sde_rotator_r3.c 116 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2015-2020, The Linux Foundation. All rights reserved.
  4. */
  5. #define pr_fmt(fmt) "%s:%d: " fmt, __func__, __LINE__
  6. #include <linux/platform_device.h>
  7. #include <linux/module.h>
  8. #include <linux/fs.h>
  9. #include <linux/file.h>
  10. #include <linux/delay.h>
  11. #include <linux/debugfs.h>
  12. #include <linux/interrupt.h>
  13. #include <linux/dma-mapping.h>
  14. #include <linux/dma-buf.h>
  15. #include <linux/clk.h>
  16. #include <linux/clk/qcom.h>
  17. #include "sde_rotator_core.h"
  18. #include "sde_rotator_util.h"
  19. #include "sde_rotator_smmu.h"
  20. #include "sde_rotator_r3.h"
  21. #include "sde_rotator_r3_internal.h"
  22. #include "sde_rotator_r3_hwio.h"
  23. #include "sde_rotator_r3_debug.h"
  24. #include "sde_rotator_trace.h"
  25. #include "sde_rotator_debug.h"
  26. #include "sde_rotator_vbif.h"
  27. #define RES_UHD (3840*2160)
  28. #define MS_TO_US(t) ((t) * USEC_PER_MSEC)
  29. /* traffic shaping clock ticks = finish_time x 19.2MHz */
  30. #define TRAFFIC_SHAPE_CLKTICK_14MS 268800
  31. #define TRAFFIC_SHAPE_CLKTICK_12MS 230400
  32. #define TRAFFIC_SHAPE_VSYNC_CLK 19200000
  33. /* wait for at most 2 vsync for lowest refresh rate (24hz) */
  34. #define KOFF_TIMEOUT (42 * 8)
  35. /*
  36. * When in sbuf mode, select a much longer wait, to allow the other driver
  37. * to detect timeouts and abort if necessary.
  38. */
  39. #define KOFF_TIMEOUT_SBUF (10000)
  40. /* default stream buffer headroom in lines */
  41. #define DEFAULT_SBUF_HEADROOM 20
  42. #define DEFAULT_UBWC_MALSIZE 0
  43. #define DEFAULT_UBWC_SWIZZLE 0
  44. #define DEFAULT_MAXLINEWIDTH 4096
  45. /* stride alignment requirement for avoiding partial writes */
  46. #define PARTIAL_WRITE_ALIGNMENT 0x1F
  47. /* Macro for constructing the REGDMA command */
  48. #define SDE_REGDMA_WRITE(p, off, data) \
  49. do { \
  50. SDEROT_DBG("SDEREG.W:[%s:0x%X] <= 0x%X\n", #off, (off),\
  51. (u32)(data));\
  52. writel_relaxed_no_log( \
  53. (REGDMA_OP_REGWRITE | \
  54. ((off) & REGDMA_ADDR_OFFSET_MASK)), \
  55. p); \
  56. p += sizeof(u32); \
  57. writel_relaxed_no_log(data, p); \
  58. p += sizeof(u32); \
  59. } while (0)
  60. #define SDE_REGDMA_MODIFY(p, off, mask, data) \
  61. do { \
  62. SDEROT_DBG("SDEREG.M:[%s:0x%X] <= 0x%X\n", #off, (off),\
  63. (u32)(data));\
  64. writel_relaxed_no_log( \
  65. (REGDMA_OP_REGMODIFY | \
  66. ((off) & REGDMA_ADDR_OFFSET_MASK)), \
  67. p); \
  68. p += sizeof(u32); \
  69. writel_relaxed_no_log(mask, p); \
  70. p += sizeof(u32); \
  71. writel_relaxed_no_log(data, p); \
  72. p += sizeof(u32); \
  73. } while (0)
  74. #define SDE_REGDMA_BLKWRITE_INC(p, off, len) \
  75. do { \
  76. SDEROT_DBG("SDEREG.B:[%s:0x%X:0x%X]\n", #off, (off),\
  77. (u32)(len));\
  78. writel_relaxed_no_log( \
  79. (REGDMA_OP_BLKWRITE_INC | \
  80. ((off) & REGDMA_ADDR_OFFSET_MASK)), \
  81. p); \
  82. p += sizeof(u32); \
  83. writel_relaxed_no_log(len, p); \
  84. p += sizeof(u32); \
  85. } while (0)
  86. #define SDE_REGDMA_BLKWRITE_DATA(p, data) \
  87. do { \
  88. SDEROT_DBG("SDEREG.I:[:] <= 0x%X\n", (u32)(data));\
  89. writel_relaxed_no_log(data, p); \
  90. p += sizeof(u32); \
  91. } while (0)
  92. #define SDE_REGDMA_READ(p, data) \
  93. do { \
  94. data = readl_relaxed_no_log(p); \
  95. p += sizeof(u32); \
  96. } while (0)
  97. /* Macro for directly accessing mapped registers */
  98. #define SDE_ROTREG_WRITE(base, off, data) \
  99. do { \
  100. SDEROT_DBG("SDEREG.D:[%s:0x%X] <= 0x%X\n", #off, (off)\
  101. , (u32)(data));\
  102. writel_relaxed(data, (base + (off))); \
  103. } while (0)
  104. #define SDE_ROTREG_READ(base, off) \
  105. readl_relaxed(base + (off))
  106. #define SDE_ROTTOP_IN_OFFLINE_MODE(_rottop_op_mode_) \
  107. (((_rottop_op_mode_) & ROTTOP_OP_MODE_ROT_OUT_MASK) == 0)
  108. static const u32 sde_hw_rotator_v3_inpixfmts[] = {
  109. SDE_PIX_FMT_XRGB_8888,
  110. SDE_PIX_FMT_ARGB_8888,
  111. SDE_PIX_FMT_ABGR_8888,
  112. SDE_PIX_FMT_RGBA_8888,
  113. SDE_PIX_FMT_BGRA_8888,
  114. SDE_PIX_FMT_RGBX_8888,
  115. SDE_PIX_FMT_BGRX_8888,
  116. SDE_PIX_FMT_XBGR_8888,
  117. SDE_PIX_FMT_RGBA_5551,
  118. SDE_PIX_FMT_ARGB_1555,
  119. SDE_PIX_FMT_ABGR_1555,
  120. SDE_PIX_FMT_BGRA_5551,
  121. SDE_PIX_FMT_BGRX_5551,
  122. SDE_PIX_FMT_RGBX_5551,
  123. SDE_PIX_FMT_XBGR_1555,
  124. SDE_PIX_FMT_XRGB_1555,
  125. SDE_PIX_FMT_ARGB_4444,
  126. SDE_PIX_FMT_RGBA_4444,
  127. SDE_PIX_FMT_BGRA_4444,
  128. SDE_PIX_FMT_ABGR_4444,
  129. SDE_PIX_FMT_RGBX_4444,
  130. SDE_PIX_FMT_XRGB_4444,
  131. SDE_PIX_FMT_BGRX_4444,
  132. SDE_PIX_FMT_XBGR_4444,
  133. SDE_PIX_FMT_RGB_888,
  134. SDE_PIX_FMT_BGR_888,
  135. SDE_PIX_FMT_RGB_565,
  136. SDE_PIX_FMT_BGR_565,
  137. SDE_PIX_FMT_Y_CB_CR_H2V2,
  138. SDE_PIX_FMT_Y_CR_CB_H2V2,
  139. SDE_PIX_FMT_Y_CR_CB_GH2V2,
  140. SDE_PIX_FMT_Y_CBCR_H2V2,
  141. SDE_PIX_FMT_Y_CRCB_H2V2,
  142. SDE_PIX_FMT_Y_CBCR_H1V2,
  143. SDE_PIX_FMT_Y_CRCB_H1V2,
  144. SDE_PIX_FMT_Y_CBCR_H2V1,
  145. SDE_PIX_FMT_Y_CRCB_H2V1,
  146. SDE_PIX_FMT_YCBYCR_H2V1,
  147. SDE_PIX_FMT_Y_CBCR_H2V2_VENUS,
  148. SDE_PIX_FMT_Y_CRCB_H2V2_VENUS,
  149. SDE_PIX_FMT_RGBA_8888_UBWC,
  150. SDE_PIX_FMT_RGBX_8888_UBWC,
  151. SDE_PIX_FMT_RGB_565_UBWC,
  152. SDE_PIX_FMT_Y_CBCR_H2V2_UBWC,
  153. SDE_PIX_FMT_RGBA_1010102,
  154. SDE_PIX_FMT_RGBX_1010102,
  155. SDE_PIX_FMT_ARGB_2101010,
  156. SDE_PIX_FMT_XRGB_2101010,
  157. SDE_PIX_FMT_BGRA_1010102,
  158. SDE_PIX_FMT_BGRX_1010102,
  159. SDE_PIX_FMT_ABGR_2101010,
  160. SDE_PIX_FMT_XBGR_2101010,
  161. SDE_PIX_FMT_RGBA_1010102_UBWC,
  162. SDE_PIX_FMT_RGBX_1010102_UBWC,
  163. SDE_PIX_FMT_Y_CBCR_H2V2_P010,
  164. SDE_PIX_FMT_Y_CBCR_H2V2_TP10,
  165. SDE_PIX_FMT_Y_CBCR_H2V2_TP10_UBWC,
  166. };
  167. static const u32 sde_hw_rotator_v3_outpixfmts[] = {
  168. SDE_PIX_FMT_XRGB_8888,
  169. SDE_PIX_FMT_ARGB_8888,
  170. SDE_PIX_FMT_ABGR_8888,
  171. SDE_PIX_FMT_RGBA_8888,
  172. SDE_PIX_FMT_BGRA_8888,
  173. SDE_PIX_FMT_RGBX_8888,
  174. SDE_PIX_FMT_BGRX_8888,
  175. SDE_PIX_FMT_XBGR_8888,
  176. SDE_PIX_FMT_RGBA_5551,
  177. SDE_PIX_FMT_ARGB_1555,
  178. SDE_PIX_FMT_ABGR_1555,
  179. SDE_PIX_FMT_BGRA_5551,
  180. SDE_PIX_FMT_BGRX_5551,
  181. SDE_PIX_FMT_RGBX_5551,
  182. SDE_PIX_FMT_XBGR_1555,
  183. SDE_PIX_FMT_XRGB_1555,
  184. SDE_PIX_FMT_ARGB_4444,
  185. SDE_PIX_FMT_RGBA_4444,
  186. SDE_PIX_FMT_BGRA_4444,
  187. SDE_PIX_FMT_ABGR_4444,
  188. SDE_PIX_FMT_RGBX_4444,
  189. SDE_PIX_FMT_XRGB_4444,
  190. SDE_PIX_FMT_BGRX_4444,
  191. SDE_PIX_FMT_XBGR_4444,
  192. SDE_PIX_FMT_RGB_888,
  193. SDE_PIX_FMT_BGR_888,
  194. SDE_PIX_FMT_RGB_565,
  195. SDE_PIX_FMT_BGR_565,
  196. /* SDE_PIX_FMT_Y_CB_CR_H2V2 */
  197. /* SDE_PIX_FMT_Y_CR_CB_H2V2 */
  198. /* SDE_PIX_FMT_Y_CR_CB_GH2V2 */
  199. SDE_PIX_FMT_Y_CBCR_H2V2,
  200. SDE_PIX_FMT_Y_CRCB_H2V2,
  201. SDE_PIX_FMT_Y_CBCR_H1V2,
  202. SDE_PIX_FMT_Y_CRCB_H1V2,
  203. SDE_PIX_FMT_Y_CBCR_H2V1,
  204. SDE_PIX_FMT_Y_CRCB_H2V1,
  205. /* SDE_PIX_FMT_YCBYCR_H2V1 */
  206. SDE_PIX_FMT_Y_CBCR_H2V2_VENUS,
  207. SDE_PIX_FMT_Y_CRCB_H2V2_VENUS,
  208. SDE_PIX_FMT_RGBA_8888_UBWC,
  209. SDE_PIX_FMT_RGBX_8888_UBWC,
  210. SDE_PIX_FMT_RGB_565_UBWC,
  211. SDE_PIX_FMT_Y_CBCR_H2V2_UBWC,
  212. SDE_PIX_FMT_RGBA_1010102,
  213. SDE_PIX_FMT_RGBX_1010102,
  214. /* SDE_PIX_FMT_ARGB_2101010 */
  215. /* SDE_PIX_FMT_XRGB_2101010 */
  216. SDE_PIX_FMT_BGRA_1010102,
  217. SDE_PIX_FMT_BGRX_1010102,
  218. /* SDE_PIX_FMT_ABGR_2101010 */
  219. /* SDE_PIX_FMT_XBGR_2101010 */
  220. SDE_PIX_FMT_RGBA_1010102_UBWC,
  221. SDE_PIX_FMT_RGBX_1010102_UBWC,
  222. SDE_PIX_FMT_Y_CBCR_H2V2_P010,
  223. SDE_PIX_FMT_Y_CBCR_H2V2_TP10,
  224. SDE_PIX_FMT_Y_CBCR_H2V2_TP10_UBWC,
  225. };
  226. static const u32 sde_hw_rotator_v4_inpixfmts[] = {
  227. SDE_PIX_FMT_XRGB_8888,
  228. SDE_PIX_FMT_ARGB_8888,
  229. SDE_PIX_FMT_ABGR_8888,
  230. SDE_PIX_FMT_RGBA_8888,
  231. SDE_PIX_FMT_BGRA_8888,
  232. SDE_PIX_FMT_RGBX_8888,
  233. SDE_PIX_FMT_BGRX_8888,
  234. SDE_PIX_FMT_XBGR_8888,
  235. SDE_PIX_FMT_RGBA_5551,
  236. SDE_PIX_FMT_ARGB_1555,
  237. SDE_PIX_FMT_ABGR_1555,
  238. SDE_PIX_FMT_BGRA_5551,
  239. SDE_PIX_FMT_BGRX_5551,
  240. SDE_PIX_FMT_RGBX_5551,
  241. SDE_PIX_FMT_XBGR_1555,
  242. SDE_PIX_FMT_XRGB_1555,
  243. SDE_PIX_FMT_ARGB_4444,
  244. SDE_PIX_FMT_RGBA_4444,
  245. SDE_PIX_FMT_BGRA_4444,
  246. SDE_PIX_FMT_ABGR_4444,
  247. SDE_PIX_FMT_RGBX_4444,
  248. SDE_PIX_FMT_XRGB_4444,
  249. SDE_PIX_FMT_BGRX_4444,
  250. SDE_PIX_FMT_XBGR_4444,
  251. SDE_PIX_FMT_RGB_888,
  252. SDE_PIX_FMT_BGR_888,
  253. SDE_PIX_FMT_RGB_565,
  254. SDE_PIX_FMT_BGR_565,
  255. SDE_PIX_FMT_Y_CB_CR_H2V2,
  256. SDE_PIX_FMT_Y_CR_CB_H2V2,
  257. SDE_PIX_FMT_Y_CR_CB_GH2V2,
  258. SDE_PIX_FMT_Y_CBCR_H2V2,
  259. SDE_PIX_FMT_Y_CRCB_H2V2,
  260. SDE_PIX_FMT_Y_CBCR_H1V2,
  261. SDE_PIX_FMT_Y_CRCB_H1V2,
  262. SDE_PIX_FMT_Y_CBCR_H2V1,
  263. SDE_PIX_FMT_Y_CRCB_H2V1,
  264. SDE_PIX_FMT_YCBYCR_H2V1,
  265. SDE_PIX_FMT_Y_CBCR_H2V2_VENUS,
  266. SDE_PIX_FMT_Y_CRCB_H2V2_VENUS,
  267. SDE_PIX_FMT_RGBA_8888_UBWC,
  268. SDE_PIX_FMT_RGBX_8888_UBWC,
  269. SDE_PIX_FMT_RGB_565_UBWC,
  270. SDE_PIX_FMT_Y_CBCR_H2V2_UBWC,
  271. SDE_PIX_FMT_RGBA_1010102,
  272. SDE_PIX_FMT_RGBX_1010102,
  273. SDE_PIX_FMT_ARGB_2101010,
  274. SDE_PIX_FMT_XRGB_2101010,
  275. SDE_PIX_FMT_BGRA_1010102,
  276. SDE_PIX_FMT_BGRX_1010102,
  277. SDE_PIX_FMT_ABGR_2101010,
  278. SDE_PIX_FMT_XBGR_2101010,
  279. SDE_PIX_FMT_RGBA_1010102_UBWC,
  280. SDE_PIX_FMT_RGBX_1010102_UBWC,
  281. SDE_PIX_FMT_Y_CBCR_H2V2_P010,
  282. SDE_PIX_FMT_Y_CBCR_H2V2_P010_VENUS,
  283. SDE_PIX_FMT_Y_CBCR_H2V2_TP10,
  284. SDE_PIX_FMT_Y_CBCR_H2V2_TP10_UBWC,
  285. SDE_PIX_FMT_Y_CBCR_H2V2_P010_UBWC,
  286. SDE_PIX_FMT_Y_CBCR_H2V2_P010_TILE,
  287. SDE_PIX_FMT_Y_CBCR_H2V2_TILE,
  288. SDE_PIX_FMT_Y_CRCB_H2V2_TILE,
  289. SDE_PIX_FMT_XRGB_8888_TILE,
  290. SDE_PIX_FMT_ARGB_8888_TILE,
  291. SDE_PIX_FMT_ABGR_8888_TILE,
  292. SDE_PIX_FMT_XBGR_8888_TILE,
  293. SDE_PIX_FMT_RGBA_8888_TILE,
  294. SDE_PIX_FMT_BGRA_8888_TILE,
  295. SDE_PIX_FMT_RGBX_8888_TILE,
  296. SDE_PIX_FMT_BGRX_8888_TILE,
  297. SDE_PIX_FMT_RGBA_1010102_TILE,
  298. SDE_PIX_FMT_RGBX_1010102_TILE,
  299. SDE_PIX_FMT_ARGB_2101010_TILE,
  300. SDE_PIX_FMT_XRGB_2101010_TILE,
  301. SDE_PIX_FMT_BGRA_1010102_TILE,
  302. SDE_PIX_FMT_BGRX_1010102_TILE,
  303. SDE_PIX_FMT_ABGR_2101010_TILE,
  304. SDE_PIX_FMT_XBGR_2101010_TILE,
  305. };
  306. static const u32 sde_hw_rotator_v4_outpixfmts[] = {
  307. SDE_PIX_FMT_XRGB_8888,
  308. SDE_PIX_FMT_ARGB_8888,
  309. SDE_PIX_FMT_ABGR_8888,
  310. SDE_PIX_FMT_RGBA_8888,
  311. SDE_PIX_FMT_BGRA_8888,
  312. SDE_PIX_FMT_RGBX_8888,
  313. SDE_PIX_FMT_BGRX_8888,
  314. SDE_PIX_FMT_XBGR_8888,
  315. SDE_PIX_FMT_RGBA_5551,
  316. SDE_PIX_FMT_ARGB_1555,
  317. SDE_PIX_FMT_ABGR_1555,
  318. SDE_PIX_FMT_BGRA_5551,
  319. SDE_PIX_FMT_BGRX_5551,
  320. SDE_PIX_FMT_RGBX_5551,
  321. SDE_PIX_FMT_XBGR_1555,
  322. SDE_PIX_FMT_XRGB_1555,
  323. SDE_PIX_FMT_ARGB_4444,
  324. SDE_PIX_FMT_RGBA_4444,
  325. SDE_PIX_FMT_BGRA_4444,
  326. SDE_PIX_FMT_ABGR_4444,
  327. SDE_PIX_FMT_RGBX_4444,
  328. SDE_PIX_FMT_XRGB_4444,
  329. SDE_PIX_FMT_BGRX_4444,
  330. SDE_PIX_FMT_XBGR_4444,
  331. SDE_PIX_FMT_RGB_888,
  332. SDE_PIX_FMT_BGR_888,
  333. SDE_PIX_FMT_RGB_565,
  334. SDE_PIX_FMT_BGR_565,
  335. /* SDE_PIX_FMT_Y_CB_CR_H2V2 */
  336. /* SDE_PIX_FMT_Y_CR_CB_H2V2 */
  337. /* SDE_PIX_FMT_Y_CR_CB_GH2V2 */
  338. SDE_PIX_FMT_Y_CBCR_H2V2,
  339. SDE_PIX_FMT_Y_CRCB_H2V2,
  340. SDE_PIX_FMT_Y_CBCR_H1V2,
  341. SDE_PIX_FMT_Y_CRCB_H1V2,
  342. SDE_PIX_FMT_Y_CBCR_H2V1,
  343. SDE_PIX_FMT_Y_CRCB_H2V1,
  344. /* SDE_PIX_FMT_YCBYCR_H2V1 */
  345. SDE_PIX_FMT_Y_CBCR_H2V2_VENUS,
  346. SDE_PIX_FMT_Y_CRCB_H2V2_VENUS,
  347. SDE_PIX_FMT_RGBA_8888_UBWC,
  348. SDE_PIX_FMT_RGBX_8888_UBWC,
  349. SDE_PIX_FMT_RGB_565_UBWC,
  350. SDE_PIX_FMT_Y_CBCR_H2V2_UBWC,
  351. SDE_PIX_FMT_RGBA_1010102,
  352. SDE_PIX_FMT_RGBX_1010102,
  353. SDE_PIX_FMT_ARGB_2101010,
  354. SDE_PIX_FMT_XRGB_2101010,
  355. SDE_PIX_FMT_BGRA_1010102,
  356. SDE_PIX_FMT_BGRX_1010102,
  357. SDE_PIX_FMT_ABGR_2101010,
  358. SDE_PIX_FMT_XBGR_2101010,
  359. SDE_PIX_FMT_RGBA_1010102_UBWC,
  360. SDE_PIX_FMT_RGBX_1010102_UBWC,
  361. SDE_PIX_FMT_Y_CBCR_H2V2_P010,
  362. SDE_PIX_FMT_Y_CBCR_H2V2_P010_VENUS,
  363. SDE_PIX_FMT_Y_CBCR_H2V2_TP10,
  364. SDE_PIX_FMT_Y_CBCR_H2V2_TP10_UBWC,
  365. SDE_PIX_FMT_Y_CBCR_H2V2_P010_UBWC,
  366. SDE_PIX_FMT_Y_CBCR_H2V2_P010_TILE,
  367. SDE_PIX_FMT_Y_CBCR_H2V2_TILE,
  368. SDE_PIX_FMT_Y_CRCB_H2V2_TILE,
  369. SDE_PIX_FMT_XRGB_8888_TILE,
  370. SDE_PIX_FMT_ARGB_8888_TILE,
  371. SDE_PIX_FMT_ABGR_8888_TILE,
  372. SDE_PIX_FMT_XBGR_8888_TILE,
  373. SDE_PIX_FMT_RGBA_8888_TILE,
  374. SDE_PIX_FMT_BGRA_8888_TILE,
  375. SDE_PIX_FMT_RGBX_8888_TILE,
  376. SDE_PIX_FMT_BGRX_8888_TILE,
  377. SDE_PIX_FMT_RGBA_1010102_TILE,
  378. SDE_PIX_FMT_RGBX_1010102_TILE,
  379. SDE_PIX_FMT_ARGB_2101010_TILE,
  380. SDE_PIX_FMT_XRGB_2101010_TILE,
  381. SDE_PIX_FMT_BGRA_1010102_TILE,
  382. SDE_PIX_FMT_BGRX_1010102_TILE,
  383. SDE_PIX_FMT_ABGR_2101010_TILE,
  384. SDE_PIX_FMT_XBGR_2101010_TILE,
  385. };
  386. static const u32 sde_hw_rotator_v4_inpixfmts_sbuf[] = {
  387. SDE_PIX_FMT_Y_CBCR_H2V2_P010,
  388. SDE_PIX_FMT_Y_CBCR_H2V2,
  389. SDE_PIX_FMT_Y_CRCB_H2V2,
  390. SDE_PIX_FMT_Y_CBCR_H2V2_TP10_UBWC,
  391. SDE_PIX_FMT_Y_CBCR_H2V2_P010_UBWC,
  392. SDE_PIX_FMT_Y_CBCR_H2V2_UBWC,
  393. SDE_PIX_FMT_Y_CBCR_H2V2_TP10,
  394. SDE_PIX_FMT_Y_CBCR_H2V2_P010_TILE,
  395. SDE_PIX_FMT_Y_CBCR_H2V2_TILE,
  396. };
  397. static const u32 sde_hw_rotator_v4_outpixfmts_sbuf[] = {
  398. SDE_PIX_FMT_Y_CBCR_H2V2_TP10,
  399. SDE_PIX_FMT_Y_CBCR_H2V2_P010_TILE,
  400. SDE_PIX_FMT_Y_CBCR_H2V2_TILE,
  401. };
  402. static struct sde_rot_vbif_debug_bus nrt_vbif_dbg_bus_r3[] = {
  403. {0x214, 0x21c, 16, 1, 0x200}, /* arb clients main */
  404. {0x214, 0x21c, 0, 12, 0x13}, /* xin blocks - axi side */
  405. {0x21c, 0x214, 0, 12, 0xc}, /* xin blocks - clock side */
  406. };
  407. static struct sde_rot_debug_bus rot_dbgbus_r3[] = {
  408. /*
  409. * rottop - 0xA8850
  410. */
  411. /* REGDMA */
  412. { 0XA8850, 0, 0 },
  413. { 0XA8850, 0, 1 },
  414. { 0XA8850, 0, 2 },
  415. { 0XA8850, 0, 3 },
  416. { 0XA8850, 0, 4 },
  417. /* ROT_WB */
  418. { 0XA8850, 1, 0 },
  419. { 0XA8850, 1, 1 },
  420. { 0XA8850, 1, 2 },
  421. { 0XA8850, 1, 3 },
  422. { 0XA8850, 1, 4 },
  423. { 0XA8850, 1, 5 },
  424. { 0XA8850, 1, 6 },
  425. { 0XA8850, 1, 7 },
  426. /* UBWC_DEC */
  427. { 0XA8850, 2, 0 },
  428. /* UBWC_ENC */
  429. { 0XA8850, 3, 0 },
  430. /* ROT_FETCH_0 */
  431. { 0XA8850, 4, 0 },
  432. { 0XA8850, 4, 1 },
  433. { 0XA8850, 4, 2 },
  434. { 0XA8850, 4, 3 },
  435. { 0XA8850, 4, 4 },
  436. { 0XA8850, 4, 5 },
  437. { 0XA8850, 4, 6 },
  438. { 0XA8850, 4, 7 },
  439. /* ROT_FETCH_1 */
  440. { 0XA8850, 5, 0 },
  441. { 0XA8850, 5, 1 },
  442. { 0XA8850, 5, 2 },
  443. { 0XA8850, 5, 3 },
  444. { 0XA8850, 5, 4 },
  445. { 0XA8850, 5, 5 },
  446. { 0XA8850, 5, 6 },
  447. { 0XA8850, 5, 7 },
  448. /* ROT_FETCH_2 */
  449. { 0XA8850, 6, 0 },
  450. { 0XA8850, 6, 1 },
  451. { 0XA8850, 6, 2 },
  452. { 0XA8850, 6, 3 },
  453. { 0XA8850, 6, 4 },
  454. { 0XA8850, 6, 5 },
  455. { 0XA8850, 6, 6 },
  456. { 0XA8850, 6, 7 },
  457. /* ROT_FETCH_3 */
  458. { 0XA8850, 7, 0 },
  459. { 0XA8850, 7, 1 },
  460. { 0XA8850, 7, 2 },
  461. { 0XA8850, 7, 3 },
  462. { 0XA8850, 7, 4 },
  463. { 0XA8850, 7, 5 },
  464. { 0XA8850, 7, 6 },
  465. { 0XA8850, 7, 7 },
  466. /* ROT_FETCH_4 */
  467. { 0XA8850, 8, 0 },
  468. { 0XA8850, 8, 1 },
  469. { 0XA8850, 8, 2 },
  470. { 0XA8850, 8, 3 },
  471. { 0XA8850, 8, 4 },
  472. { 0XA8850, 8, 5 },
  473. { 0XA8850, 8, 6 },
  474. { 0XA8850, 8, 7 },
  475. /* ROT_UNPACK_0*/
  476. { 0XA8850, 9, 0 },
  477. { 0XA8850, 9, 1 },
  478. { 0XA8850, 9, 2 },
  479. { 0XA8850, 9, 3 },
  480. };
  481. static struct sde_rot_regdump sde_rot_r3_regdump[] = {
  482. { "SDEROT_ROTTOP", SDE_ROT_ROTTOP_OFFSET, 0x100, SDE_ROT_REGDUMP_READ },
  483. { "SDEROT_SSPP", SDE_ROT_SSPP_OFFSET, 0x200, SDE_ROT_REGDUMP_READ },
  484. { "SDEROT_WB", SDE_ROT_WB_OFFSET, 0x300, SDE_ROT_REGDUMP_READ },
  485. { "SDEROT_REGDMA_CSR", SDE_ROT_REGDMA_OFFSET, 0x100,
  486. SDE_ROT_REGDUMP_READ },
  487. /*
  488. * Need to perform a SW reset to REGDMA in order to access the
  489. * REGDMA RAM especially if REGDMA is waiting for Rotator IDLE.
  490. * REGDMA RAM should be dump at last.
  491. */
  492. { "SDEROT_REGDMA_RESET", ROTTOP_SW_RESET_OVERRIDE, 1,
  493. SDE_ROT_REGDUMP_WRITE, 1 },
  494. { "SDEROT_REGDMA_RAM", SDE_ROT_REGDMA_RAM_OFFSET, 0x2000,
  495. SDE_ROT_REGDUMP_READ },
  496. { "SDEROT_VBIF_NRT", SDE_ROT_VBIF_NRT_OFFSET, 0x590,
  497. SDE_ROT_REGDUMP_VBIF },
  498. { "SDEROT_REGDMA_RESET", ROTTOP_SW_RESET_OVERRIDE, 1,
  499. SDE_ROT_REGDUMP_WRITE, 0 },
  500. };
  501. struct sde_rot_cdp_params {
  502. bool enable;
  503. struct sde_mdp_format_params *fmt;
  504. u32 offset;
  505. };
  506. /* Invalid software timestamp value for initialization */
  507. #define SDE_REGDMA_SWTS_INVALID (~0)
  508. /**
  509. * __sde_hw_rotator_get_timestamp - obtain rotator current timestamp
  510. * @rot: rotator context
  511. * @q_id: regdma queue id (low/high)
  512. * @return: current timestmap
  513. */
  514. static u32 __sde_hw_rotator_get_timestamp(struct sde_hw_rotator *rot, u32 q_id)
  515. {
  516. struct sde_rot_data_type *mdata = sde_rot_get_mdata();
  517. u32 ts;
  518. if (test_bit(SDE_CAPS_HW_TIMESTAMP, mdata->sde_caps_map)) {
  519. if (q_id == ROT_QUEUE_HIGH_PRIORITY)
  520. ts = SDE_ROTREG_READ(rot->mdss_base, ROTTOP_ROT_CNTR_0);
  521. else
  522. ts = SDE_ROTREG_READ(rot->mdss_base, ROTTOP_ROT_CNTR_1);
  523. } else {
  524. ts = SDE_ROTREG_READ(rot->mdss_base, REGDMA_TIMESTAMP_REG);
  525. if (q_id == ROT_QUEUE_LOW_PRIORITY)
  526. ts >>= SDE_REGDMA_SWTS_SHIFT;
  527. }
  528. return ts & SDE_REGDMA_SWTS_MASK;
  529. }
  530. /**
  531. * sde_hw_rotator_disable_irq - Disable hw rotator interrupt with ref. count
  532. * Also, clear rotator/regdma irq enable masks.
  533. * @rot: Pointer to hw rotator
  534. */
  535. static void sde_hw_rotator_disable_irq(struct sde_hw_rotator *rot)
  536. {
  537. SDEROT_DBG("irq_num:%d enabled:%d\n", rot->irq_num,
  538. atomic_read(&rot->irq_enabled));
  539. if (!atomic_read(&rot->irq_enabled)) {
  540. SDEROT_ERR("irq %d is already disabled\n", rot->irq_num);
  541. return;
  542. }
  543. if (!atomic_dec_return(&rot->irq_enabled)) {
  544. if (rot->mode == ROT_REGDMA_OFF)
  545. SDE_ROTREG_WRITE(rot->mdss_base, ROTTOP_INTR_EN, 0);
  546. else
  547. SDE_ROTREG_WRITE(rot->mdss_base,
  548. REGDMA_CSR_REGDMA_INT_EN, 0);
  549. /* disable irq after last pending irq is handled, if any */
  550. synchronize_irq(rot->irq_num);
  551. disable_irq_nosync(rot->irq_num);
  552. }
  553. }
  554. /**
  555. * sde_hw_rotator_elapsed_swts - Find difference of 2 software timestamps
  556. * @ts_curr: current software timestamp
  557. * @ts_prev: previous software timestamp
  558. * @return: the amount ts_curr is ahead of ts_prev
  559. */
  560. static int sde_hw_rotator_elapsed_swts(u32 ts_curr, u32 ts_prev)
  561. {
  562. u32 diff = (ts_curr - ts_prev) & SDE_REGDMA_SWTS_MASK;
  563. return sign_extend32(diff, (SDE_REGDMA_SWTS_SHIFT - 1));
  564. }
  565. /*
  566. * sde_hw_rotator_rotirq_handler - non-regdma interrupt handler
  567. * @irq: Interrupt number
  568. * @ptr: Pointer to private handle provided during registration
  569. *
  570. * This function services rotator interrupt and wakes up waiting client
  571. * with pending rotation requests already submitted to h/w.
  572. */
  573. static irqreturn_t sde_hw_rotator_rotirq_handler(int irq, void *ptr)
  574. {
  575. struct sde_hw_rotator *rot = ptr;
  576. struct sde_hw_rotator_context *ctx;
  577. irqreturn_t ret = IRQ_NONE;
  578. u32 isr;
  579. isr = SDE_ROTREG_READ(rot->mdss_base, ROTTOP_INTR_STATUS);
  580. SDEROT_DBG("intr_status = %8.8x\n", isr);
  581. if (isr & ROT_DONE_MASK) {
  582. sde_hw_rotator_disable_irq(rot);
  583. SDEROT_DBG("Notify rotator complete\n");
  584. /* Normal rotator only 1 session, no need to lookup */
  585. ctx = rot->rotCtx[0][0];
  586. WARN_ON(ctx == NULL);
  587. complete_all(&ctx->rot_comp);
  588. spin_lock(&rot->rotisr_lock);
  589. SDE_ROTREG_WRITE(rot->mdss_base, ROTTOP_INTR_CLEAR,
  590. ROT_DONE_CLEAR);
  591. spin_unlock(&rot->rotisr_lock);
  592. ret = IRQ_HANDLED;
  593. }
  594. return ret;
  595. }
  596. /*
  597. * sde_hw_rotator_regdmairq_handler - regdma interrupt handler
  598. * @irq: Interrupt number
  599. * @ptr: Pointer to private handle provided during registration
  600. *
  601. * This function services rotator interrupt, decoding the source of
  602. * events (high/low priority queue), and wakes up all waiting clients
  603. * with pending rotation requests already submitted to h/w.
  604. */
  605. static irqreturn_t sde_hw_rotator_regdmairq_handler(int irq, void *ptr)
  606. {
  607. struct sde_rot_data_type *mdata = sde_rot_get_mdata();
  608. struct sde_hw_rotator *rot = ptr;
  609. struct sde_hw_rotator_context *ctx, *tmp;
  610. irqreturn_t ret = IRQ_NONE;
  611. u32 isr, isr_tmp;
  612. u32 ts;
  613. u32 q_id;
  614. isr = SDE_ROTREG_READ(rot->mdss_base, REGDMA_CSR_REGDMA_INT_STATUS);
  615. /* acknowledge interrupt before reading latest timestamp */
  616. SDE_ROTREG_WRITE(rot->mdss_base, REGDMA_CSR_REGDMA_INT_CLEAR, isr);
  617. SDEROT_DBG("intr_status = %8.8x\n", isr);
  618. /* Any REGDMA status, including error and watchdog timer, should
  619. * trigger and wake up waiting thread
  620. */
  621. if (isr & (REGDMA_INT_HIGH_MASK | REGDMA_INT_LOW_MASK)) {
  622. spin_lock(&rot->rotisr_lock);
  623. /*
  624. * Obtain rotator context based on timestamp from regdma
  625. * and low/high interrupt status
  626. */
  627. if (isr & REGDMA_INT_HIGH_MASK) {
  628. q_id = ROT_QUEUE_HIGH_PRIORITY;
  629. } else if (isr & REGDMA_INT_LOW_MASK) {
  630. q_id = ROT_QUEUE_LOW_PRIORITY;
  631. } else {
  632. SDEROT_ERR("unknown ISR status: isr=0x%X\n", isr);
  633. goto done_isr_handle;
  634. }
  635. ts = __sde_hw_rotator_get_timestamp(rot, q_id);
  636. /*
  637. * Timestamp packet is not available in sbuf mode.
  638. * Simulate timestamp update in the handler instead.
  639. */
  640. if (test_bit(SDE_CAPS_HW_TIMESTAMP, mdata->sde_caps_map) ||
  641. list_empty(&rot->sbuf_ctx[q_id]))
  642. goto skip_sbuf;
  643. ctx = NULL;
  644. isr_tmp = isr;
  645. list_for_each_entry(tmp, &rot->sbuf_ctx[q_id], list) {
  646. u32 mask;
  647. mask = tmp->timestamp & 0x1 ? REGDMA_INT_1_MASK :
  648. REGDMA_INT_0_MASK;
  649. if (isr_tmp & mask) {
  650. isr_tmp &= ~mask;
  651. ctx = tmp;
  652. ts = ctx->timestamp;
  653. rot->ops.update_ts(rot, ctx->q_id, ts);
  654. SDEROT_DBG("update swts:0x%X\n", ts);
  655. }
  656. SDEROT_EVTLOG(isr, tmp->timestamp);
  657. }
  658. if (ctx == NULL)
  659. SDEROT_ERR("invalid swts ctx\n");
  660. skip_sbuf:
  661. ctx = rot->rotCtx[q_id][ts & SDE_HW_ROT_REGDMA_SEG_MASK];
  662. /*
  663. * Wake up all waiting context from the current and previous
  664. * SW Timestamp.
  665. */
  666. while (ctx &&
  667. sde_hw_rotator_elapsed_swts(ctx->timestamp, ts) >= 0) {
  668. ctx->last_regdma_isr_status = isr;
  669. ctx->last_regdma_timestamp = ts;
  670. SDEROT_DBG(
  671. "regdma complete: ctx:%pK, ts:%X\n", ctx, ts);
  672. wake_up_all(&ctx->regdma_waitq);
  673. ts = (ts - 1) & SDE_REGDMA_SWTS_MASK;
  674. ctx = rot->rotCtx[q_id]
  675. [ts & SDE_HW_ROT_REGDMA_SEG_MASK];
  676. };
  677. done_isr_handle:
  678. spin_unlock(&rot->rotisr_lock);
  679. ret = IRQ_HANDLED;
  680. } else if (isr & REGDMA_INT_ERR_MASK) {
  681. /*
  682. * For REGDMA Err, we save the isr info and wake up
  683. * all waiting contexts
  684. */
  685. int i, j;
  686. SDEROT_ERR(
  687. "regdma err isr:%X, wake up all waiting contexts\n",
  688. isr);
  689. spin_lock(&rot->rotisr_lock);
  690. for (i = 0; i < ROT_QUEUE_MAX; i++) {
  691. for (j = 0; j < SDE_HW_ROT_REGDMA_TOTAL_CTX; j++) {
  692. ctx = rot->rotCtx[i][j];
  693. if (ctx && ctx->last_regdma_isr_status == 0) {
  694. ts = __sde_hw_rotator_get_timestamp(
  695. rot, i);
  696. ctx->last_regdma_isr_status = isr;
  697. ctx->last_regdma_timestamp = ts;
  698. wake_up_all(&ctx->regdma_waitq);
  699. SDEROT_DBG("Wake rotctx[%d][%d]:%pK\n",
  700. i, j, ctx);
  701. }
  702. }
  703. }
  704. spin_unlock(&rot->rotisr_lock);
  705. ret = IRQ_HANDLED;
  706. }
  707. return ret;
  708. }
  709. /**
  710. * sde_hw_rotator_pending_hwts - Check if the given context is still pending
  711. * @rot: Pointer to hw rotator
  712. * @ctx: Pointer to rotator context
  713. * @phwts: Pointer to returned reference hw timestamp, optional
  714. * @return: true if context has pending requests
  715. */
  716. static int sde_hw_rotator_pending_hwts(struct sde_hw_rotator *rot,
  717. struct sde_hw_rotator_context *ctx, u32 *phwts)
  718. {
  719. u32 hwts;
  720. int ts_diff;
  721. bool pending;
  722. if (ctx->last_regdma_timestamp == SDE_REGDMA_SWTS_INVALID) {
  723. if (ctx->q_id == ROT_QUEUE_LOW_PRIORITY)
  724. hwts = SDE_ROTREG_READ(rot->mdss_base,
  725. ROTTOP_ROT_CNTR_1);
  726. else
  727. hwts = SDE_ROTREG_READ(rot->mdss_base,
  728. ROTTOP_ROT_CNTR_0);
  729. } else {
  730. hwts = ctx->last_regdma_timestamp;
  731. }
  732. hwts &= SDE_REGDMA_SWTS_MASK;
  733. ts_diff = sde_hw_rotator_elapsed_swts(ctx->timestamp, hwts);
  734. if (phwts)
  735. *phwts = hwts;
  736. pending = (ts_diff > 0) ? true : false;
  737. SDEROT_DBG("ts:0x%x, queue_id:%d, hwts:0x%x, pending:%d\n",
  738. ctx->timestamp, ctx->q_id, hwts, pending);
  739. SDEROT_EVTLOG(ctx->timestamp, hwts, ctx->q_id, ts_diff);
  740. return pending;
  741. }
  742. /**
  743. * sde_hw_rotator_update_hwts - update hw timestamp with given value
  744. * @rot: Pointer to hw rotator
  745. * @q_id: rotator queue id
  746. * @hwts: new hw timestamp
  747. */
  748. static void sde_hw_rotator_update_hwts(struct sde_hw_rotator *rot,
  749. u32 q_id, u32 hwts)
  750. {
  751. if (q_id == ROT_QUEUE_LOW_PRIORITY)
  752. SDE_ROTREG_WRITE(rot->mdss_base, ROTTOP_ROT_CNTR_1, hwts);
  753. else
  754. SDE_ROTREG_WRITE(rot->mdss_base, ROTTOP_ROT_CNTR_0, hwts);
  755. }
  756. /**
  757. * sde_hw_rotator_pending_swts - Check if the given context is still pending
  758. * @rot: Pointer to hw rotator
  759. * @ctx: Pointer to rotator context
  760. * @pswts: Pointer to returned reference software timestamp, optional
  761. * @return: true if context has pending requests
  762. */
  763. static int sde_hw_rotator_pending_swts(struct sde_hw_rotator *rot,
  764. struct sde_hw_rotator_context *ctx, u32 *pswts)
  765. {
  766. u32 swts;
  767. int ts_diff;
  768. bool pending;
  769. if (ctx->last_regdma_timestamp == SDE_REGDMA_SWTS_INVALID)
  770. swts = SDE_ROTREG_READ(rot->mdss_base, REGDMA_TIMESTAMP_REG);
  771. else
  772. swts = ctx->last_regdma_timestamp;
  773. if (ctx->q_id == ROT_QUEUE_LOW_PRIORITY)
  774. swts >>= SDE_REGDMA_SWTS_SHIFT;
  775. swts &= SDE_REGDMA_SWTS_MASK;
  776. ts_diff = sde_hw_rotator_elapsed_swts(ctx->timestamp, swts);
  777. if (pswts)
  778. *pswts = swts;
  779. pending = (ts_diff > 0) ? true : false;
  780. SDEROT_DBG("ts:0x%x, queue_id:%d, swts:0x%x, pending:%d\n",
  781. ctx->timestamp, ctx->q_id, swts, pending);
  782. SDEROT_EVTLOG(ctx->timestamp, swts, ctx->q_id, ts_diff);
  783. return pending;
  784. }
  785. /**
  786. * sde_hw_rotator_update_swts - update software timestamp with given value
  787. * @rot: Pointer to hw rotator
  788. * @q_id: rotator queue id
  789. * @swts: new software timestamp
  790. */
  791. static void sde_hw_rotator_update_swts(struct sde_hw_rotator *rot,
  792. u32 q_id, u32 swts)
  793. {
  794. u32 mask = SDE_REGDMA_SWTS_MASK;
  795. swts &= SDE_REGDMA_SWTS_MASK;
  796. if (q_id == ROT_QUEUE_LOW_PRIORITY) {
  797. swts <<= SDE_REGDMA_SWTS_SHIFT;
  798. mask <<= SDE_REGDMA_SWTS_SHIFT;
  799. }
  800. swts |= (SDE_ROTREG_READ(rot->mdss_base, REGDMA_TIMESTAMP_REG) & ~mask);
  801. SDE_ROTREG_WRITE(rot->mdss_base, REGDMA_TIMESTAMP_REG, swts);
  802. }
  803. /*
  804. * sde_hw_rotator_irq_setup - setup rotator irq
  805. * @mgr: Pointer to rotator manager
  806. * return: none
  807. */
  808. static int sde_hw_rotator_irq_setup(struct sde_hw_rotator *rot)
  809. {
  810. int rc = 0;
  811. /* return early if irq is already setup */
  812. if (rot->irq_num >= 0)
  813. return 0;
  814. rot->irq_num = platform_get_irq(rot->pdev, 0);
  815. if (rot->irq_num < 0) {
  816. rc = rot->irq_num;
  817. SDEROT_ERR("fail to get rot irq, fallback to poll %d\n", rc);
  818. } else {
  819. if (rot->mode == ROT_REGDMA_OFF)
  820. rc = devm_request_threaded_irq(&rot->pdev->dev,
  821. rot->irq_num,
  822. sde_hw_rotator_rotirq_handler,
  823. NULL, 0, "sde_rotator_r3", rot);
  824. else
  825. rc = devm_request_threaded_irq(&rot->pdev->dev,
  826. rot->irq_num,
  827. sde_hw_rotator_regdmairq_handler,
  828. NULL, 0, "sde_rotator_r3", rot);
  829. if (rc) {
  830. SDEROT_ERR("fail to request irq r:%d\n", rc);
  831. rot->irq_num = -1;
  832. } else {
  833. disable_irq(rot->irq_num);
  834. }
  835. }
  836. return rc;
  837. }
  838. /**
  839. * sde_hw_rotator_enable_irq - Enable hw rotator interrupt with ref. count
  840. * Also, clear rotator/regdma irq status.
  841. * @rot: Pointer to hw rotator
  842. */
  843. static int sde_hw_rotator_enable_irq(struct sde_hw_rotator *rot)
  844. {
  845. int ret = 0;
  846. SDEROT_DBG("irq_num:%d enabled:%d\n", rot->irq_num,
  847. atomic_read(&rot->irq_enabled));
  848. ret = sde_hw_rotator_irq_setup(rot);
  849. if (ret < 0) {
  850. SDEROT_ERR("Rotator irq setup failed %d\n", ret);
  851. return ret;
  852. }
  853. if (!atomic_read(&rot->irq_enabled)) {
  854. if (rot->mode == ROT_REGDMA_OFF)
  855. SDE_ROTREG_WRITE(rot->mdss_base, ROTTOP_INTR_CLEAR,
  856. ROT_DONE_MASK);
  857. else
  858. SDE_ROTREG_WRITE(rot->mdss_base,
  859. REGDMA_CSR_REGDMA_INT_CLEAR, REGDMA_INT_MASK);
  860. enable_irq(rot->irq_num);
  861. }
  862. atomic_inc(&rot->irq_enabled);
  863. return ret;
  864. }
  865. static int sde_hw_rotator_halt_vbif_xin_client(void)
  866. {
  867. struct sde_mdp_vbif_halt_params halt_params;
  868. int rc = 0;
  869. struct sde_rot_data_type *mdata = sde_rot_get_mdata();
  870. memset(&halt_params, 0, sizeof(struct sde_mdp_vbif_halt_params));
  871. halt_params.xin_id = mdata->vbif_xin_id[XIN_SSPP];
  872. halt_params.reg_off_mdp_clk_ctrl = MMSS_VBIF_NRT_VBIF_CLK_FORCE_CTRL0;
  873. halt_params.bit_off_mdp_clk_ctrl =
  874. MMSS_VBIF_NRT_VBIF_CLK_FORCE_CTRL0_XIN0;
  875. sde_mdp_halt_vbif_xin(&halt_params);
  876. rc |= halt_params.xin_timeout;
  877. memset(&halt_params, 0, sizeof(struct sde_mdp_vbif_halt_params));
  878. halt_params.xin_id = mdata->vbif_xin_id[XIN_WRITEBACK];
  879. halt_params.reg_off_mdp_clk_ctrl = MMSS_VBIF_NRT_VBIF_CLK_FORCE_CTRL0;
  880. halt_params.bit_off_mdp_clk_ctrl =
  881. MMSS_VBIF_NRT_VBIF_CLK_FORCE_CTRL0_XIN1;
  882. sde_mdp_halt_vbif_xin(&halt_params);
  883. rc |= halt_params.xin_timeout;
  884. return rc;
  885. }
  886. /**
  887. * sde_hw_rotator_reset - Reset rotator hardware
  888. * @rot: pointer to hw rotator
  889. * @ctx: pointer to current rotator context during the hw hang (optional)
  890. */
  891. static int sde_hw_rotator_reset(struct sde_hw_rotator *rot,
  892. struct sde_hw_rotator_context *ctx)
  893. {
  894. struct sde_hw_rotator_context *rctx = NULL;
  895. u32 int_mask = (REGDMA_INT_0_MASK | REGDMA_INT_1_MASK |
  896. REGDMA_INT_2_MASK);
  897. u32 last_ts[ROT_QUEUE_MAX] = {0,};
  898. u32 latest_ts, opmode;
  899. int elapsed_time, t;
  900. int i, j;
  901. unsigned long flags;
  902. if (!rot) {
  903. SDEROT_ERR("NULL rotator\n");
  904. return -EINVAL;
  905. }
  906. /* sw reset the hw rotator */
  907. SDE_ROTREG_WRITE(rot->mdss_base, ROTTOP_SW_RESET_OVERRIDE, 1);
  908. /* ensure write is issued to the rotator HW */
  909. wmb();
  910. usleep_range(MS_TO_US(10), MS_TO_US(20));
  911. /* force rotator into offline mode */
  912. opmode = SDE_ROTREG_READ(rot->mdss_base, ROTTOP_OP_MODE);
  913. SDE_ROTREG_WRITE(rot->mdss_base, ROTTOP_OP_MODE,
  914. opmode & ~(BIT(5) | BIT(4) | BIT(1) | BIT(0)));
  915. SDE_ROTREG_WRITE(rot->mdss_base, ROTTOP_SW_RESET_OVERRIDE, 0);
  916. /* halt vbif xin client to ensure no pending transaction */
  917. sde_hw_rotator_halt_vbif_xin_client();
  918. /* if no ctx is specified, skip ctx wake up */
  919. if (!ctx)
  920. return 0;
  921. if (ctx->q_id >= ROT_QUEUE_MAX) {
  922. SDEROT_ERR("context q_id out of range: %d\n", ctx->q_id);
  923. return -EINVAL;
  924. }
  925. spin_lock_irqsave(&rot->rotisr_lock, flags);
  926. /* update timestamp register with current context */
  927. last_ts[ctx->q_id] = ctx->timestamp;
  928. rot->ops.update_ts(rot, ctx->q_id, ctx->timestamp);
  929. SDEROT_EVTLOG(ctx->timestamp);
  930. /*
  931. * Search for any pending rot session, and look for last timestamp
  932. * per hw queue.
  933. */
  934. for (i = 0; i < ROT_QUEUE_MAX; i++) {
  935. latest_ts = atomic_read(&rot->timestamp[i]);
  936. latest_ts &= SDE_REGDMA_SWTS_MASK;
  937. elapsed_time = sde_hw_rotator_elapsed_swts(latest_ts,
  938. last_ts[i]);
  939. for (j = 0; j < SDE_HW_ROT_REGDMA_TOTAL_CTX; j++) {
  940. rctx = rot->rotCtx[i][j];
  941. if (rctx && rctx != ctx) {
  942. rctx->last_regdma_isr_status = int_mask;
  943. rctx->last_regdma_timestamp = rctx->timestamp;
  944. t = sde_hw_rotator_elapsed_swts(latest_ts,
  945. rctx->timestamp);
  946. if (t < elapsed_time) {
  947. elapsed_time = t;
  948. last_ts[i] = rctx->timestamp;
  949. rot->ops.update_ts(rot, i, last_ts[i]);
  950. }
  951. SDEROT_DBG("rotctx[%d][%d], ts:%d\n",
  952. i, j, rctx->timestamp);
  953. SDEROT_EVTLOG(i, j, rctx->timestamp,
  954. last_ts[i]);
  955. }
  956. }
  957. }
  958. /* Finally wakeup all pending rotator context in queue */
  959. for (i = 0; i < ROT_QUEUE_MAX; i++) {
  960. for (j = 0; j < SDE_HW_ROT_REGDMA_TOTAL_CTX; j++) {
  961. rctx = rot->rotCtx[i][j];
  962. if (rctx && rctx != ctx)
  963. wake_up_all(&rctx->regdma_waitq);
  964. }
  965. }
  966. spin_unlock_irqrestore(&rot->rotisr_lock, flags);
  967. return 0;
  968. }
  969. /**
  970. * _sde_hw_rotator_dump_status - Dump hw rotator status on error
  971. * @rot: Pointer to hw rotator
  972. */
  973. static void _sde_hw_rotator_dump_status(struct sde_hw_rotator *rot,
  974. u32 *ubwcerr)
  975. {
  976. struct sde_rot_data_type *mdata = sde_rot_get_mdata();
  977. u32 reg = 0;
  978. SDEROT_ERR(
  979. "op_mode = %x, int_en = %x, int_status = %x\n",
  980. SDE_ROTREG_READ(rot->mdss_base,
  981. REGDMA_CSR_REGDMA_OP_MODE),
  982. SDE_ROTREG_READ(rot->mdss_base,
  983. REGDMA_CSR_REGDMA_INT_EN),
  984. SDE_ROTREG_READ(rot->mdss_base,
  985. REGDMA_CSR_REGDMA_INT_STATUS));
  986. SDEROT_ERR(
  987. "ts0/ts1 = %x/%x, q0_status = %x, q1_status = %x, block_status = %x\n",
  988. __sde_hw_rotator_get_timestamp(rot, ROT_QUEUE_HIGH_PRIORITY),
  989. __sde_hw_rotator_get_timestamp(rot, ROT_QUEUE_LOW_PRIORITY),
  990. SDE_ROTREG_READ(rot->mdss_base,
  991. REGDMA_CSR_REGDMA_QUEUE_0_STATUS),
  992. SDE_ROTREG_READ(rot->mdss_base,
  993. REGDMA_CSR_REGDMA_QUEUE_1_STATUS),
  994. SDE_ROTREG_READ(rot->mdss_base,
  995. REGDMA_CSR_REGDMA_BLOCK_STATUS));
  996. SDEROT_ERR(
  997. "invalid_cmd_offset = %x, fsm_state = %x\n",
  998. SDE_ROTREG_READ(rot->mdss_base,
  999. REGDMA_CSR_REGDMA_INVALID_CMD_RAM_OFFSET),
  1000. SDE_ROTREG_READ(rot->mdss_base,
  1001. REGDMA_CSR_REGDMA_FSM_STATE));
  1002. SDEROT_ERR("rottop: op_mode = %x, status = %x, clk_status = %x\n",
  1003. SDE_ROTREG_READ(rot->mdss_base, ROTTOP_OP_MODE),
  1004. SDE_ROTREG_READ(rot->mdss_base, ROTTOP_STATUS),
  1005. SDE_ROTREG_READ(rot->mdss_base, ROTTOP_CLK_STATUS));
  1006. reg = SDE_ROTREG_READ(rot->mdss_base, ROT_SSPP_UBWC_ERROR_STATUS);
  1007. if (ubwcerr)
  1008. *ubwcerr = reg;
  1009. SDEROT_ERR(
  1010. "UBWC decode status = %x, UBWC encode status = %x\n", reg,
  1011. SDE_ROTREG_READ(rot->mdss_base, ROT_WB_UBWC_ERROR_STATUS));
  1012. SDEROT_ERR("VBIF XIN HALT status = %x VBIF AXI HALT status = %x\n",
  1013. SDE_VBIF_READ(mdata, MMSS_VBIF_XIN_HALT_CTRL1),
  1014. SDE_VBIF_READ(mdata, MMSS_VBIF_AXI_HALT_CTRL1));
  1015. SDEROT_ERR("sspp unpack wr: plane0 = %x, plane1 = %x, plane2 = %x\n",
  1016. SDE_ROTREG_READ(rot->mdss_base,
  1017. ROT_SSPP_FETCH_SMP_WR_PLANE0),
  1018. SDE_ROTREG_READ(rot->mdss_base,
  1019. ROT_SSPP_FETCH_SMP_WR_PLANE1),
  1020. SDE_ROTREG_READ(rot->mdss_base,
  1021. ROT_SSPP_FETCH_SMP_WR_PLANE2));
  1022. SDEROT_ERR("sspp unpack rd: plane0 = %x, plane1 = %x, plane2 = %x\n",
  1023. SDE_ROTREG_READ(rot->mdss_base,
  1024. ROT_SSPP_SMP_UNPACK_RD_PLANE0),
  1025. SDE_ROTREG_READ(rot->mdss_base,
  1026. ROT_SSPP_SMP_UNPACK_RD_PLANE1),
  1027. SDE_ROTREG_READ(rot->mdss_base,
  1028. ROT_SSPP_SMP_UNPACK_RD_PLANE2));
  1029. SDEROT_ERR("sspp: unpack_ln = %x, unpack_blk = %x, fill_lvl = %x\n",
  1030. SDE_ROTREG_READ(rot->mdss_base,
  1031. ROT_SSPP_UNPACK_LINE_COUNT),
  1032. SDE_ROTREG_READ(rot->mdss_base,
  1033. ROT_SSPP_UNPACK_BLK_COUNT),
  1034. SDE_ROTREG_READ(rot->mdss_base,
  1035. ROT_SSPP_FILL_LEVELS));
  1036. SDEROT_ERR("wb: sbuf0 = %x, sbuf1 = %x, sys_cache = %x\n",
  1037. SDE_ROTREG_READ(rot->mdss_base,
  1038. ROT_WB_SBUF_STATUS_PLANE0),
  1039. SDE_ROTREG_READ(rot->mdss_base,
  1040. ROT_WB_SBUF_STATUS_PLANE1),
  1041. SDE_ROTREG_READ(rot->mdss_base,
  1042. ROT_WB_SYS_CACHE_MODE));
  1043. }
  1044. /**
  1045. * sde_hw_rotator_get_ctx(): Retrieve rotator context from rotator HW based
  1046. * on provided session_id. Each rotator has a different session_id.
  1047. * @rot: Pointer to rotator hw
  1048. * @session_id: Identifier for rotator session
  1049. * @sequence_id: Identifier for rotation request within the session
  1050. * @q_id: Rotator queue identifier
  1051. */
  1052. static struct sde_hw_rotator_context *sde_hw_rotator_get_ctx(
  1053. struct sde_hw_rotator *rot, u32 session_id, u32 sequence_id,
  1054. enum sde_rot_queue_prio q_id)
  1055. {
  1056. int i;
  1057. struct sde_hw_rotator_context *ctx = NULL;
  1058. for (i = 0; i < SDE_HW_ROT_REGDMA_TOTAL_CTX; i++) {
  1059. ctx = rot->rotCtx[q_id][i];
  1060. if (ctx && (ctx->session_id == session_id) &&
  1061. (ctx->sequence_id == sequence_id)) {
  1062. SDEROT_DBG(
  1063. "rotCtx sloti[%d][%d] ==> ctx:%pK | session-id:%d | sequence-id:%d\n",
  1064. q_id, i, ctx, ctx->session_id,
  1065. ctx->sequence_id);
  1066. return ctx;
  1067. }
  1068. }
  1069. return NULL;
  1070. }
  1071. /*
  1072. * sde_hw_rotator_map_vaddr - map the debug buffer to kernel space
  1073. * @dbgbuf: Pointer to debug buffer
  1074. * @buf: Pointer to layer buffer structure
  1075. * @data: Pointer to h/w mapped buffer structure
  1076. */
  1077. static void sde_hw_rotator_map_vaddr(struct sde_dbg_buf *dbgbuf,
  1078. struct sde_layer_buffer *buf, struct sde_mdp_data *data)
  1079. {
  1080. dbgbuf->dmabuf = data->p[0].srcp_dma_buf;
  1081. dbgbuf->buflen = data->p[0].srcp_dma_buf->size;
  1082. dbgbuf->vaddr = NULL;
  1083. dbgbuf->width = buf->width;
  1084. dbgbuf->height = buf->height;
  1085. if (dbgbuf->dmabuf && (dbgbuf->buflen > 0)) {
  1086. dma_buf_begin_cpu_access(dbgbuf->dmabuf, DMA_FROM_DEVICE);
  1087. dbgbuf->vaddr = dma_buf_vmap(dbgbuf->dmabuf);
  1088. SDEROT_DBG("vaddr mapping: 0x%pK/%ld w:%d/h:%d\n",
  1089. dbgbuf->vaddr, dbgbuf->buflen,
  1090. dbgbuf->width, dbgbuf->height);
  1091. }
  1092. }
  1093. /*
  1094. * sde_hw_rotator_unmap_vaddr - unmap the debug buffer from kernel space
  1095. * @dbgbuf: Pointer to debug buffer
  1096. */
  1097. static void sde_hw_rotator_unmap_vaddr(struct sde_dbg_buf *dbgbuf)
  1098. {
  1099. if (dbgbuf->vaddr) {
  1100. dma_buf_kunmap(dbgbuf->dmabuf, 0, dbgbuf->vaddr);
  1101. dma_buf_end_cpu_access(dbgbuf->dmabuf, DMA_FROM_DEVICE);
  1102. }
  1103. dbgbuf->vaddr = NULL;
  1104. dbgbuf->dmabuf = NULL;
  1105. dbgbuf->buflen = 0;
  1106. dbgbuf->width = 0;
  1107. dbgbuf->height = 0;
  1108. }
  1109. static void sde_hw_rotator_vbif_rt_setting(void)
  1110. {
  1111. u32 reg_high, reg_shift, reg_val, reg_val_lvl, mask, vbif_qos;
  1112. struct sde_rot_data_type *mdata = sde_rot_get_mdata();
  1113. int i, j;
  1114. vbif_lock(mdata->parent_pdev);
  1115. for (i = 0; i < mdata->npriority_lvl; i++) {
  1116. for (j = 0; j < MAX_XIN; j++) {
  1117. reg_high = ((mdata->vbif_xin_id[j]
  1118. & 0x8) >> 3) * 4 + (i * 8);
  1119. reg_shift = mdata->vbif_xin_id[j] * 4;
  1120. reg_val = SDE_VBIF_READ(mdata,
  1121. MMSS_VBIF_NRT_VBIF_QOS_RP_REMAP_000 + reg_high);
  1122. reg_val_lvl = SDE_VBIF_READ(mdata,
  1123. MMSS_VBIF_NRT_VBIF_QOS_LVL_REMAP_000 + reg_high);
  1124. mask = 0x7 << (mdata->vbif_xin_id[j] * 4);
  1125. vbif_qos = mdata->vbif_nrt_qos[i];
  1126. reg_val &= ~mask;
  1127. reg_val |= (vbif_qos << reg_shift) & mask;
  1128. reg_val_lvl &= ~mask;
  1129. reg_val_lvl |= (vbif_qos << reg_shift) & mask;
  1130. SDE_VBIF_WRITE(mdata,
  1131. MMSS_VBIF_NRT_VBIF_QOS_RP_REMAP_000 + reg_high,
  1132. reg_val);
  1133. SDE_VBIF_WRITE(mdata,
  1134. MMSS_VBIF_NRT_VBIF_QOS_LVL_REMAP_000 + reg_high,
  1135. reg_val_lvl);
  1136. }
  1137. }
  1138. vbif_unlock(mdata->parent_pdev);
  1139. }
  1140. /*
  1141. * sde_hw_rotator_vbif_setting - helper function to set vbif QoS remapper
  1142. * levels, enable write gather enable and avoid clk gating setting for
  1143. * debug purpose.
  1144. *
  1145. * @rot: Pointer to rotator hw
  1146. */
  1147. static void sde_hw_rotator_vbif_setting(struct sde_hw_rotator *rot)
  1148. {
  1149. u32 i, mask, vbif_qos, reg_val = 0;
  1150. struct sde_rot_data_type *mdata = sde_rot_get_mdata();
  1151. /* VBIF_ROT QoS remapper setting */
  1152. switch (mdata->npriority_lvl) {
  1153. case SDE_MDP_VBIF_4_LEVEL_REMAPPER:
  1154. for (i = 0; i < mdata->npriority_lvl; i++) {
  1155. reg_val = SDE_VBIF_READ(mdata,
  1156. MMSS_VBIF_NRT_VBIF_QOS_REMAP_00 + i*4);
  1157. mask = 0x3 << (XIN_SSPP * 2);
  1158. vbif_qos = mdata->vbif_nrt_qos[i];
  1159. reg_val |= vbif_qos << (XIN_SSPP * 2);
  1160. /* ensure write is issued after the read operation */
  1161. mb();
  1162. SDE_VBIF_WRITE(mdata,
  1163. MMSS_VBIF_NRT_VBIF_QOS_REMAP_00 + i*4,
  1164. reg_val);
  1165. }
  1166. break;
  1167. case SDE_MDP_VBIF_8_LEVEL_REMAPPER:
  1168. mask = mdata->npriority_lvl - 1;
  1169. for (i = 0; i < mdata->npriority_lvl; i++) {
  1170. /* RD and WR client */
  1171. reg_val |= (mdata->vbif_nrt_qos[i] & mask)
  1172. << (XIN_SSPP * 4);
  1173. reg_val |= (mdata->vbif_nrt_qos[i] & mask)
  1174. << (XIN_WRITEBACK * 4);
  1175. SDE_VBIF_WRITE(mdata,
  1176. MMSS_VBIF_NRT_VBIF_QOS_RP_REMAP_000 + i*8,
  1177. reg_val);
  1178. SDE_VBIF_WRITE(mdata,
  1179. MMSS_VBIF_NRT_VBIF_QOS_LVL_REMAP_000 + i*8,
  1180. reg_val);
  1181. }
  1182. break;
  1183. default:
  1184. SDEROT_DBG("invalid vbif remapper levels\n");
  1185. }
  1186. /* Enable write gather for writeback to remove write gaps, which
  1187. * may hang AXI/BIMC/SDE.
  1188. */
  1189. SDE_VBIF_WRITE(mdata, MMSS_VBIF_NRT_VBIF_WRITE_GATHTER_EN,
  1190. BIT(XIN_WRITEBACK));
  1191. /*
  1192. * For debug purpose, disable clock gating, i.e. Clocks always on
  1193. */
  1194. if (mdata->clk_always_on) {
  1195. SDE_VBIF_WRITE(mdata, MMSS_VBIF_CLKON, 0x3);
  1196. SDE_VBIF_WRITE(mdata, MMSS_VBIF_NRT_VBIF_CLK_FORCE_CTRL0, 0x3);
  1197. SDE_VBIF_WRITE(mdata, MMSS_VBIF_NRT_VBIF_CLK_FORCE_CTRL1,
  1198. 0xFFFF);
  1199. SDE_ROTREG_WRITE(rot->mdss_base, ROTTOP_CLK_CTRL, 1);
  1200. }
  1201. }
  1202. /*
  1203. * sde_hw_rotator_setup_timestamp_packet - setup timestamp writeback command
  1204. * @ctx: Pointer to rotator context
  1205. * @mask: Bit mask location of the timestamp
  1206. * @swts: Software timestamp
  1207. */
  1208. static void sde_hw_rotator_setup_timestamp_packet(
  1209. struct sde_hw_rotator_context *ctx, u32 mask, u32 swts)
  1210. {
  1211. char __iomem *wrptr;
  1212. wrptr = sde_hw_rotator_get_regdma_segment(ctx);
  1213. /*
  1214. * Create a dummy packet write out to 1 location for timestamp
  1215. * generation.
  1216. */
  1217. SDE_REGDMA_BLKWRITE_INC(wrptr, ROT_SSPP_SRC_SIZE, 6);
  1218. SDE_REGDMA_BLKWRITE_DATA(wrptr, 0x00010001);
  1219. SDE_REGDMA_BLKWRITE_DATA(wrptr, 0);
  1220. SDE_REGDMA_BLKWRITE_DATA(wrptr, 0);
  1221. SDE_REGDMA_BLKWRITE_DATA(wrptr, 0x00010001);
  1222. SDE_REGDMA_BLKWRITE_DATA(wrptr, 0);
  1223. SDE_REGDMA_BLKWRITE_DATA(wrptr, ctx->ts_addr);
  1224. SDE_REGDMA_WRITE(wrptr, ROT_SSPP_SRC_YSTRIDE0, 4);
  1225. SDE_REGDMA_BLKWRITE_INC(wrptr, ROT_SSPP_SRC_FORMAT, 4);
  1226. SDE_REGDMA_BLKWRITE_DATA(wrptr, 0x004037FF);
  1227. SDE_REGDMA_BLKWRITE_DATA(wrptr, 0x03020100);
  1228. SDE_REGDMA_BLKWRITE_DATA(wrptr, 0x80000000);
  1229. SDE_REGDMA_BLKWRITE_DATA(wrptr, ctx->timestamp);
  1230. /*
  1231. * Must clear secure buffer setting for SW timestamp because
  1232. * SW timstamp buffer allocation is always non-secure region.
  1233. */
  1234. if (ctx->is_secure) {
  1235. SDE_REGDMA_WRITE(wrptr, ROT_SSPP_SRC_ADDR_SW_STATUS, 0);
  1236. SDE_REGDMA_WRITE(wrptr, ROT_WB_DST_ADDR_SW_STATUS, 0);
  1237. }
  1238. SDE_REGDMA_BLKWRITE_INC(wrptr, ROT_WB_DST_FORMAT, 4);
  1239. SDE_REGDMA_BLKWRITE_DATA(wrptr, 0x000037FF);
  1240. SDE_REGDMA_BLKWRITE_DATA(wrptr, 0);
  1241. SDE_REGDMA_BLKWRITE_DATA(wrptr, 0x03020100);
  1242. SDE_REGDMA_BLKWRITE_DATA(wrptr, ctx->ts_addr);
  1243. SDE_REGDMA_WRITE(wrptr, ROT_WB_DST_YSTRIDE0, 4);
  1244. SDE_REGDMA_WRITE(wrptr, ROT_WB_OUT_SIZE, 0x00010001);
  1245. SDE_REGDMA_WRITE(wrptr, ROT_WB_OUT_IMG_SIZE, 0x00010001);
  1246. SDE_REGDMA_WRITE(wrptr, ROT_WB_OUT_XY, 0);
  1247. SDE_REGDMA_WRITE(wrptr, ROT_WB_DST_WRITE_CONFIG,
  1248. (ctx->rot->highest_bank & 0x3) << 8);
  1249. SDE_REGDMA_WRITE(wrptr, ROTTOP_DNSC, 0);
  1250. SDE_REGDMA_WRITE(wrptr, ROTTOP_OP_MODE, 1);
  1251. SDE_REGDMA_MODIFY(wrptr, REGDMA_TIMESTAMP_REG, mask, swts);
  1252. SDE_REGDMA_WRITE(wrptr, ROTTOP_START_CTRL, 1);
  1253. sde_hw_rotator_put_regdma_segment(ctx, wrptr);
  1254. }
  1255. /*
  1256. * sde_hw_rotator_cdp_configs - configures the CDP registers
  1257. * @ctx: Pointer to rotator context
  1258. * @params: Pointer to parameters needed for CDP configs
  1259. */
  1260. static void sde_hw_rotator_cdp_configs(struct sde_hw_rotator_context *ctx,
  1261. struct sde_rot_cdp_params *params)
  1262. {
  1263. int reg_val;
  1264. char __iomem *wrptr = sde_hw_rotator_get_regdma_segment(ctx);
  1265. if (!params->enable) {
  1266. SDE_REGDMA_WRITE(wrptr, params->offset, 0x0);
  1267. goto end;
  1268. }
  1269. reg_val = BIT(0); /* enable cdp */
  1270. if (sde_mdp_is_ubwc_format(params->fmt))
  1271. reg_val |= BIT(1); /* enable UBWC meta cdp */
  1272. if (sde_mdp_is_ubwc_format(params->fmt)
  1273. || sde_mdp_is_tilea4x_format(params->fmt)
  1274. || sde_mdp_is_tilea5x_format(params->fmt))
  1275. reg_val |= BIT(2); /* enable tile amortize */
  1276. reg_val |= BIT(3); /* enable preload addr ahead cnt 64 */
  1277. SDE_REGDMA_WRITE(wrptr, params->offset, reg_val);
  1278. end:
  1279. sde_hw_rotator_put_regdma_segment(ctx, wrptr);
  1280. }
  1281. /*
  1282. * sde_hw_rotator_setup_qos_lut_wr - Set QoS LUT/Danger LUT/Safe LUT configs
  1283. * for the WRITEBACK rotator for inline and offline rotation.
  1284. *
  1285. * @ctx: Pointer to rotator context
  1286. */
  1287. static void sde_hw_rotator_setup_qos_lut_wr(struct sde_hw_rotator_context *ctx)
  1288. {
  1289. struct sde_rot_data_type *mdata = sde_rot_get_mdata();
  1290. char __iomem *wrptr = sde_hw_rotator_get_regdma_segment(ctx);
  1291. /* Offline rotation setting */
  1292. if (!ctx->sbuf_mode) {
  1293. /* QOS LUT WR setting */
  1294. if (test_bit(SDE_QOS_LUT, mdata->sde_qos_map)) {
  1295. SDE_REGDMA_WRITE(wrptr, ROT_WB_CREQ_LUT_0,
  1296. mdata->lut_cfg[SDE_ROT_WR].creq_lut_0);
  1297. SDE_REGDMA_WRITE(wrptr, ROT_WB_CREQ_LUT_1,
  1298. mdata->lut_cfg[SDE_ROT_WR].creq_lut_1);
  1299. }
  1300. /* Danger LUT WR setting */
  1301. if (test_bit(SDE_QOS_DANGER_LUT, mdata->sde_qos_map))
  1302. SDE_REGDMA_WRITE(wrptr, ROT_WB_DANGER_LUT,
  1303. mdata->lut_cfg[SDE_ROT_WR].danger_lut);
  1304. /* Safe LUT WR setting */
  1305. if (test_bit(SDE_QOS_SAFE_LUT, mdata->sde_qos_map))
  1306. SDE_REGDMA_WRITE(wrptr, ROT_WB_SAFE_LUT,
  1307. mdata->lut_cfg[SDE_ROT_WR].safe_lut);
  1308. /* Inline rotation setting */
  1309. } else {
  1310. /* QOS LUT WR setting */
  1311. if (test_bit(SDE_INLINE_QOS_LUT, mdata->sde_inline_qos_map)) {
  1312. SDE_REGDMA_WRITE(wrptr, ROT_WB_CREQ_LUT_0,
  1313. mdata->inline_lut_cfg[SDE_ROT_WR].creq_lut_0);
  1314. SDE_REGDMA_WRITE(wrptr, ROT_WB_CREQ_LUT_1,
  1315. mdata->inline_lut_cfg[SDE_ROT_WR].creq_lut_1);
  1316. }
  1317. /* Danger LUT WR setting */
  1318. if (test_bit(SDE_INLINE_QOS_DANGER_LUT,
  1319. mdata->sde_inline_qos_map))
  1320. SDE_REGDMA_WRITE(wrptr, ROT_WB_DANGER_LUT,
  1321. mdata->inline_lut_cfg[SDE_ROT_WR].danger_lut);
  1322. /* Safe LUT WR setting */
  1323. if (test_bit(SDE_INLINE_QOS_SAFE_LUT,
  1324. mdata->sde_inline_qos_map))
  1325. SDE_REGDMA_WRITE(wrptr, ROT_WB_SAFE_LUT,
  1326. mdata->inline_lut_cfg[SDE_ROT_WR].safe_lut);
  1327. }
  1328. /* Update command queue write ptr */
  1329. sde_hw_rotator_put_regdma_segment(ctx, wrptr);
  1330. }
  1331. /*
  1332. * sde_hw_rotator_setup_qos_lut_rd - Set QoS LUT/Danger LUT/Safe LUT configs
  1333. * for the SSPP rotator for inline and offline rotation.
  1334. *
  1335. * @ctx: Pointer to rotator context
  1336. */
  1337. static void sde_hw_rotator_setup_qos_lut_rd(struct sde_hw_rotator_context *ctx)
  1338. {
  1339. struct sde_rot_data_type *mdata = sde_rot_get_mdata();
  1340. char __iomem *wrptr = sde_hw_rotator_get_regdma_segment(ctx);
  1341. /* Offline rotation setting */
  1342. if (!ctx->sbuf_mode) {
  1343. /* QOS LUT RD setting */
  1344. if (test_bit(SDE_QOS_LUT, mdata->sde_qos_map)) {
  1345. SDE_REGDMA_WRITE(wrptr, ROT_SSPP_CREQ_LUT_0,
  1346. mdata->lut_cfg[SDE_ROT_RD].creq_lut_0);
  1347. SDE_REGDMA_WRITE(wrptr, ROT_SSPP_CREQ_LUT_1,
  1348. mdata->lut_cfg[SDE_ROT_RD].creq_lut_1);
  1349. }
  1350. /* Danger LUT RD setting */
  1351. if (test_bit(SDE_QOS_DANGER_LUT, mdata->sde_qos_map))
  1352. SDE_REGDMA_WRITE(wrptr, ROT_SSPP_DANGER_LUT,
  1353. mdata->lut_cfg[SDE_ROT_RD].danger_lut);
  1354. /* Safe LUT RD setting */
  1355. if (test_bit(SDE_QOS_SAFE_LUT, mdata->sde_qos_map))
  1356. SDE_REGDMA_WRITE(wrptr, ROT_SSPP_SAFE_LUT,
  1357. mdata->lut_cfg[SDE_ROT_RD].safe_lut);
  1358. /* inline rotation setting */
  1359. } else {
  1360. /* QOS LUT RD setting */
  1361. if (test_bit(SDE_INLINE_QOS_LUT, mdata->sde_inline_qos_map)) {
  1362. SDE_REGDMA_WRITE(wrptr, ROT_SSPP_CREQ_LUT_0,
  1363. mdata->inline_lut_cfg[SDE_ROT_RD].creq_lut_0);
  1364. SDE_REGDMA_WRITE(wrptr, ROT_SSPP_CREQ_LUT_1,
  1365. mdata->inline_lut_cfg[SDE_ROT_RD].creq_lut_1);
  1366. }
  1367. /* Danger LUT RD setting */
  1368. if (test_bit(SDE_INLINE_QOS_DANGER_LUT,
  1369. mdata->sde_inline_qos_map))
  1370. SDE_REGDMA_WRITE(wrptr, ROT_SSPP_DANGER_LUT,
  1371. mdata->inline_lut_cfg[SDE_ROT_RD].danger_lut);
  1372. /* Safe LUT RD setting */
  1373. if (test_bit(SDE_INLINE_QOS_SAFE_LUT,
  1374. mdata->sde_inline_qos_map))
  1375. SDE_REGDMA_WRITE(wrptr, ROT_SSPP_SAFE_LUT,
  1376. mdata->inline_lut_cfg[SDE_ROT_RD].safe_lut);
  1377. }
  1378. /* Update command queue write ptr */
  1379. sde_hw_rotator_put_regdma_segment(ctx, wrptr);
  1380. }
  1381. static void sde_hw_rotator_setup_fetchengine_helper(
  1382. struct sde_hw_rot_sspp_cfg *cfg,
  1383. struct sde_rot_data_type *mdata,
  1384. struct sde_hw_rotator_context *ctx, char __iomem *wrptr,
  1385. u32 flags, u32 *width, u32 *height)
  1386. {
  1387. int i;
  1388. /*
  1389. * initialize start control trigger selection first
  1390. */
  1391. if (test_bit(SDE_CAPS_SBUF_1, mdata->sde_caps_map)) {
  1392. if (ctx->sbuf_mode)
  1393. SDE_REGDMA_WRITE(wrptr, ROTTOP_START_CTRL,
  1394. ctx->start_ctrl);
  1395. else
  1396. SDE_REGDMA_WRITE(wrptr, ROTTOP_START_CTRL, 0);
  1397. }
  1398. /* source image setup */
  1399. if ((flags & SDE_ROT_FLAG_DEINTERLACE)
  1400. && !(flags & SDE_ROT_FLAG_SOURCE_ROTATED_90)) {
  1401. for (i = 0; i < cfg->src_plane.num_planes; i++)
  1402. cfg->src_plane.ystride[i] *= 2;
  1403. *width *= 2;
  1404. *height /= 2;
  1405. }
  1406. }
  1407. /*
  1408. * sde_hw_rotator_setup_fetchengine - setup fetch engine
  1409. * @ctx: Pointer to rotator context
  1410. * @queue_id: Priority queue identifier
  1411. * @cfg: Fetch configuration
  1412. * @danger_lut: real-time QoS LUT for danger setting (not used)
  1413. * @safe_lut: real-time QoS LUT for safe setting (not used)
  1414. * @dnsc_factor_w: downscale factor for width
  1415. * @dnsc_factor_h: downscale factor for height
  1416. * @flags: Control flag
  1417. */
  1418. static void sde_hw_rotator_setup_fetchengine(struct sde_hw_rotator_context *ctx,
  1419. enum sde_rot_queue_prio queue_id,
  1420. struct sde_hw_rot_sspp_cfg *cfg, u32 danger_lut, u32 safe_lut,
  1421. u32 dnsc_factor_w, u32 dnsc_factor_h, u32 flags)
  1422. {
  1423. struct sde_hw_rotator *rot = ctx->rot;
  1424. struct sde_mdp_format_params *fmt;
  1425. struct sde_mdp_data *data;
  1426. struct sde_rot_cdp_params cdp_params = {0};
  1427. struct sde_rot_data_type *mdata = sde_rot_get_mdata();
  1428. char __iomem *wrptr;
  1429. u32 opmode = 0;
  1430. u32 chroma_samp = 0;
  1431. u32 src_format = 0;
  1432. u32 unpack = 0;
  1433. u32 width = cfg->img_width;
  1434. u32 height = cfg->img_height;
  1435. u32 fetch_blocksize = 0;
  1436. int i;
  1437. if (ctx->rot->mode == ROT_REGDMA_ON) {
  1438. if (rot->irq_num >= 0)
  1439. SDE_ROTREG_WRITE(rot->mdss_base,
  1440. REGDMA_CSR_REGDMA_INT_EN,
  1441. REGDMA_INT_MASK);
  1442. SDE_ROTREG_WRITE(rot->mdss_base, REGDMA_CSR_REGDMA_OP_MODE,
  1443. REGDMA_EN);
  1444. }
  1445. wrptr = sde_hw_rotator_get_regdma_segment(ctx);
  1446. sde_hw_rotator_setup_fetchengine_helper(cfg, mdata, ctx, wrptr,
  1447. flags, &width, &height);
  1448. /*
  1449. * REGDMA BLK write from SRC_SIZE to OP_MODE, total 15 registers
  1450. */
  1451. SDE_REGDMA_BLKWRITE_INC(wrptr, ROT_SSPP_SRC_SIZE, 15);
  1452. /* SRC_SIZE, SRC_IMG_SIZE, SRC_XY, OUT_SIZE, OUT_XY */
  1453. SDE_REGDMA_BLKWRITE_DATA(wrptr,
  1454. cfg->src_rect->w | (cfg->src_rect->h << 16));
  1455. SDE_REGDMA_BLKWRITE_DATA(wrptr, 0); /* SRC_IMG_SIZE unused */
  1456. SDE_REGDMA_BLKWRITE_DATA(wrptr,
  1457. cfg->src_rect->x | (cfg->src_rect->y << 16));
  1458. SDE_REGDMA_BLKWRITE_DATA(wrptr,
  1459. cfg->src_rect->w | (cfg->src_rect->h << 16));
  1460. SDE_REGDMA_BLKWRITE_DATA(wrptr,
  1461. cfg->src_rect->x | (cfg->src_rect->y << 16));
  1462. /* SRC_ADDR [0-3], SRC_YSTRIDE [0-1] */
  1463. data = cfg->data;
  1464. for (i = 0; i < SDE_ROT_MAX_PLANES; i++)
  1465. SDE_REGDMA_BLKWRITE_DATA(wrptr, data->p[i].addr);
  1466. SDE_REGDMA_BLKWRITE_DATA(wrptr, cfg->src_plane.ystride[0] |
  1467. (cfg->src_plane.ystride[1] << 16));
  1468. SDE_REGDMA_BLKWRITE_DATA(wrptr, cfg->src_plane.ystride[2] |
  1469. (cfg->src_plane.ystride[3] << 16));
  1470. /* UNUSED, write 0 */
  1471. SDE_REGDMA_BLKWRITE_DATA(wrptr, 0);
  1472. /* setup source format */
  1473. fmt = cfg->fmt;
  1474. chroma_samp = fmt->chroma_sample;
  1475. if (flags & SDE_ROT_FLAG_SOURCE_ROTATED_90) {
  1476. if (chroma_samp == SDE_MDP_CHROMA_H2V1)
  1477. chroma_samp = SDE_MDP_CHROMA_H1V2;
  1478. else if (chroma_samp == SDE_MDP_CHROMA_H1V2)
  1479. chroma_samp = SDE_MDP_CHROMA_H2V1;
  1480. }
  1481. src_format = (chroma_samp << 23) |
  1482. (fmt->fetch_planes << 19) |
  1483. (fmt->bits[C3_ALPHA] << 6) |
  1484. (fmt->bits[C2_R_Cr] << 4) |
  1485. (fmt->bits[C1_B_Cb] << 2) |
  1486. (fmt->bits[C0_G_Y] << 0);
  1487. if (fmt->alpha_enable &&
  1488. (fmt->fetch_planes == SDE_MDP_PLANE_INTERLEAVED))
  1489. src_format |= BIT(8); /* SRCC3_EN */
  1490. src_format |= ((fmt->unpack_count - 1) << 12) |
  1491. (fmt->unpack_tight << 17) |
  1492. (fmt->unpack_align_msb << 18) |
  1493. ((fmt->bpp - 1) << 9) |
  1494. ((fmt->frame_format & 3) << 30);
  1495. if (flags & SDE_ROT_FLAG_ROT_90)
  1496. src_format |= BIT(11); /* ROT90 */
  1497. if (sde_mdp_is_ubwc_format(fmt))
  1498. opmode |= BIT(0); /* BWC_DEC_EN */
  1499. /* if this is YUV pixel format, enable CSC */
  1500. if (sde_mdp_is_yuv_format(fmt))
  1501. src_format |= BIT(15); /* SRC_COLOR_SPACE */
  1502. if (fmt->pixel_mode == SDE_MDP_PIXEL_10BIT)
  1503. src_format |= BIT(14); /* UNPACK_DX_FORMAT */
  1504. if (rot->solid_fill)
  1505. src_format |= BIT(22); /* SOLID_FILL */
  1506. /* SRC_FORMAT */
  1507. SDE_REGDMA_BLKWRITE_DATA(wrptr, src_format);
  1508. /* setup source unpack pattern */
  1509. unpack = (fmt->element[3] << 24) | (fmt->element[2] << 16) |
  1510. (fmt->element[1] << 8) | (fmt->element[0] << 0);
  1511. /* SRC_UNPACK_PATTERN */
  1512. SDE_REGDMA_BLKWRITE_DATA(wrptr, unpack);
  1513. /* setup source op mode */
  1514. if (flags & SDE_ROT_FLAG_FLIP_LR)
  1515. opmode |= BIT(13); /* FLIP_MODE L/R horizontal flip */
  1516. if (flags & SDE_ROT_FLAG_FLIP_UD)
  1517. opmode |= BIT(14); /* FLIP_MODE U/D vertical flip */
  1518. opmode |= BIT(31); /* MDSS_MDP_OP_PE_OVERRIDE */
  1519. /* SRC_OP_MODE */
  1520. SDE_REGDMA_BLKWRITE_DATA(wrptr, opmode);
  1521. /* setup source fetch config, TP10 uses different block size */
  1522. if (test_bit(SDE_CAPS_R3_1P5_DOWNSCALE, mdata->sde_caps_map) &&
  1523. (dnsc_factor_w == 1) && (dnsc_factor_h == 1)) {
  1524. if (sde_mdp_is_tp10_format(fmt))
  1525. fetch_blocksize = SDE_ROT_SSPP_FETCH_BLOCKSIZE_144_EXT;
  1526. else
  1527. fetch_blocksize = SDE_ROT_SSPP_FETCH_BLOCKSIZE_192_EXT;
  1528. } else {
  1529. if (sde_mdp_is_tp10_format(fmt))
  1530. fetch_blocksize = SDE_ROT_SSPP_FETCH_BLOCKSIZE_96;
  1531. else
  1532. fetch_blocksize = SDE_ROT_SSPP_FETCH_BLOCKSIZE_128;
  1533. }
  1534. if (rot->solid_fill)
  1535. SDE_REGDMA_WRITE(wrptr, ROT_SSPP_SRC_CONSTANT_COLOR,
  1536. rot->constant_color);
  1537. SDE_REGDMA_WRITE(wrptr, ROT_SSPP_FETCH_CONFIG,
  1538. fetch_blocksize |
  1539. SDE_ROT_SSPP_FETCH_CONFIG_RESET_VALUE |
  1540. ((rot->highest_bank & 0x3) << 18));
  1541. if (test_bit(SDE_CAPS_UBWC_2, mdata->sde_caps_map))
  1542. SDE_REGDMA_WRITE(wrptr, ROT_SSPP_UBWC_STATIC_CTRL,
  1543. ((ctx->rot->ubwc_malsize & 0x3) << 8) |
  1544. ((ctx->rot->highest_bank & 0x3) << 4) |
  1545. ((ctx->rot->ubwc_swizzle & 0x1) << 0));
  1546. else if (test_bit(SDE_CAPS_UBWC_3, mdata->sde_caps_map) ||
  1547. test_bit(SDE_CAPS_UBWC_4, mdata->sde_caps_map))
  1548. SDE_REGDMA_WRITE(wrptr, ROT_SSPP_UBWC_STATIC_CTRL, BIT(30));
  1549. /* setup source buffer plane security status */
  1550. if (flags & (SDE_ROT_FLAG_SECURE_OVERLAY_SESSION |
  1551. SDE_ROT_FLAG_SECURE_CAMERA_SESSION)) {
  1552. SDE_REGDMA_WRITE(wrptr, ROT_SSPP_SRC_ADDR_SW_STATUS, 0xF);
  1553. ctx->is_secure = true;
  1554. } else {
  1555. SDE_REGDMA_WRITE(wrptr, ROT_SSPP_SRC_ADDR_SW_STATUS, 0);
  1556. ctx->is_secure = false;
  1557. }
  1558. /* Update command queue write ptr */
  1559. sde_hw_rotator_put_regdma_segment(ctx, wrptr);
  1560. /* CDP register RD setting */
  1561. cdp_params.enable = test_bit(SDE_QOS_CDP, mdata->sde_qos_map) ?
  1562. mdata->enable_cdp[SDE_ROT_RD] : false;
  1563. cdp_params.fmt = fmt;
  1564. cdp_params.offset = ROT_SSPP_CDP_CNTL;
  1565. sde_hw_rotator_cdp_configs(ctx, &cdp_params);
  1566. /* QOS LUT/ Danger LUT/ Safe Lut WR setting */
  1567. sde_hw_rotator_setup_qos_lut_rd(ctx);
  1568. wrptr = sde_hw_rotator_get_regdma_segment(ctx);
  1569. /*
  1570. * Determine if traffic shaping is required. Only enable traffic
  1571. * shaping when content is 4k@30fps. The actual traffic shaping
  1572. * bandwidth calculation is done in output setup.
  1573. */
  1574. if (((!ctx->sbuf_mode)
  1575. && (cfg->src_rect->w * cfg->src_rect->h) >= RES_UHD)
  1576. && (cfg->fps <= 30)) {
  1577. SDEROT_DBG("Enable Traffic Shaper\n");
  1578. ctx->is_traffic_shaping = true;
  1579. } else {
  1580. SDEROT_DBG("Disable Traffic Shaper\n");
  1581. ctx->is_traffic_shaping = false;
  1582. }
  1583. /* Update command queue write ptr */
  1584. sde_hw_rotator_put_regdma_segment(ctx, wrptr);
  1585. }
  1586. /*
  1587. * sde_hw_rotator_setup_wbengine - setup writeback engine
  1588. * @ctx: Pointer to rotator context
  1589. * @queue_id: Priority queue identifier
  1590. * @cfg: Writeback configuration
  1591. * @flags: Control flag
  1592. */
  1593. static void sde_hw_rotator_setup_wbengine(struct sde_hw_rotator_context *ctx,
  1594. enum sde_rot_queue_prio queue_id,
  1595. struct sde_hw_rot_wb_cfg *cfg,
  1596. u32 flags)
  1597. {
  1598. struct sde_rot_data_type *mdata = sde_rot_get_mdata();
  1599. struct sde_mdp_format_params *fmt;
  1600. struct sde_rot_cdp_params cdp_params = {0};
  1601. char __iomem *wrptr;
  1602. u32 pack = 0;
  1603. u32 dst_format = 0;
  1604. u32 no_partial_writes = 0;
  1605. int i;
  1606. wrptr = sde_hw_rotator_get_regdma_segment(ctx);
  1607. fmt = cfg->fmt;
  1608. /* setup WB DST format */
  1609. dst_format |= (fmt->chroma_sample << 23) |
  1610. (fmt->fetch_planes << 19) |
  1611. (fmt->bits[C3_ALPHA] << 6) |
  1612. (fmt->bits[C2_R_Cr] << 4) |
  1613. (fmt->bits[C1_B_Cb] << 2) |
  1614. (fmt->bits[C0_G_Y] << 0);
  1615. /* alpha control */
  1616. if (fmt->alpha_enable || (!fmt->is_yuv && (fmt->unpack_count == 4))) {
  1617. dst_format |= BIT(8);
  1618. if (!fmt->alpha_enable) {
  1619. dst_format |= BIT(14);
  1620. SDE_REGDMA_WRITE(wrptr, ROT_WB_DST_ALPHA_X_VALUE, 0);
  1621. }
  1622. }
  1623. dst_format |= ((fmt->unpack_count - 1) << 12) |
  1624. (fmt->unpack_tight << 17) |
  1625. (fmt->unpack_align_msb << 18) |
  1626. ((fmt->bpp - 1) << 9) |
  1627. ((fmt->frame_format & 3) << 30);
  1628. if (sde_mdp_is_yuv_format(fmt))
  1629. dst_format |= BIT(15);
  1630. if (fmt->pixel_mode == SDE_MDP_PIXEL_10BIT)
  1631. dst_format |= BIT(21); /* PACK_DX_FORMAT */
  1632. /*
  1633. * REGDMA BLK write, from DST_FORMAT to DST_YSTRIDE 1, total 9 regs
  1634. */
  1635. SDE_REGDMA_BLKWRITE_INC(wrptr, ROT_WB_DST_FORMAT, 9);
  1636. /* DST_FORMAT */
  1637. SDE_REGDMA_BLKWRITE_DATA(wrptr, dst_format);
  1638. /* DST_OP_MODE */
  1639. if (sde_mdp_is_ubwc_format(fmt))
  1640. SDE_REGDMA_BLKWRITE_DATA(wrptr, BIT(0));
  1641. else
  1642. SDE_REGDMA_BLKWRITE_DATA(wrptr, 0);
  1643. /* DST_PACK_PATTERN */
  1644. pack = (fmt->element[3] << 24) | (fmt->element[2] << 16) |
  1645. (fmt->element[1] << 8) | (fmt->element[0] << 0);
  1646. SDE_REGDMA_BLKWRITE_DATA(wrptr, pack);
  1647. /* DST_ADDR [0-3], DST_YSTRIDE [0-1] */
  1648. for (i = 0; i < SDE_ROT_MAX_PLANES; i++)
  1649. SDE_REGDMA_BLKWRITE_DATA(wrptr, cfg->data->p[i].addr);
  1650. SDE_REGDMA_BLKWRITE_DATA(wrptr, cfg->dst_plane.ystride[0] |
  1651. (cfg->dst_plane.ystride[1] << 16));
  1652. SDE_REGDMA_BLKWRITE_DATA(wrptr, cfg->dst_plane.ystride[2] |
  1653. (cfg->dst_plane.ystride[3] << 16));
  1654. /* setup WB out image size and ROI */
  1655. SDE_REGDMA_WRITE(wrptr, ROT_WB_OUT_IMG_SIZE,
  1656. cfg->img_width | (cfg->img_height << 16));
  1657. SDE_REGDMA_WRITE(wrptr, ROT_WB_OUT_SIZE,
  1658. cfg->dst_rect->w | (cfg->dst_rect->h << 16));
  1659. SDE_REGDMA_WRITE(wrptr, ROT_WB_OUT_XY,
  1660. cfg->dst_rect->x | (cfg->dst_rect->y << 16));
  1661. if (flags & (SDE_ROT_FLAG_SECURE_OVERLAY_SESSION |
  1662. SDE_ROT_FLAG_SECURE_CAMERA_SESSION))
  1663. SDE_REGDMA_WRITE(wrptr, ROT_WB_DST_ADDR_SW_STATUS, 0x1);
  1664. else
  1665. SDE_REGDMA_WRITE(wrptr, ROT_WB_DST_ADDR_SW_STATUS, 0);
  1666. /*
  1667. * setup Downscale factor
  1668. */
  1669. SDE_REGDMA_WRITE(wrptr, ROTTOP_DNSC,
  1670. cfg->v_downscale_factor |
  1671. (cfg->h_downscale_factor << 16));
  1672. /* partial write check */
  1673. if (test_bit(SDE_CAPS_PARTIALWR, mdata->sde_caps_map)) {
  1674. no_partial_writes = BIT(10);
  1675. /*
  1676. * For simplicity, don't disable partial writes if
  1677. * the ROI does not span the entire width of the
  1678. * output image, and require the total stride to
  1679. * also be properly aligned.
  1680. *
  1681. * This avoids having to determine the memory access
  1682. * alignment of the actual horizontal ROI on a per
  1683. * color format basis.
  1684. */
  1685. if (sde_mdp_is_ubwc_format(fmt)) {
  1686. no_partial_writes = 0x0;
  1687. } else if (cfg->dst_rect->x ||
  1688. cfg->dst_rect->w != cfg->img_width) {
  1689. no_partial_writes = 0x0;
  1690. } else {
  1691. for (i = 0; i < SDE_ROT_MAX_PLANES; i++)
  1692. if (cfg->dst_plane.ystride[i] &
  1693. PARTIAL_WRITE_ALIGNMENT)
  1694. no_partial_writes = 0x0;
  1695. }
  1696. }
  1697. /* write config setup for bank configuration */
  1698. SDE_REGDMA_WRITE(wrptr, ROT_WB_DST_WRITE_CONFIG, no_partial_writes |
  1699. (ctx->rot->highest_bank & 0x3) << 8);
  1700. if (test_bit(SDE_CAPS_UBWC_2, mdata->sde_caps_map))
  1701. SDE_REGDMA_WRITE(wrptr, ROT_WB_UBWC_STATIC_CTRL,
  1702. ((ctx->rot->ubwc_malsize & 0x3) << 8) |
  1703. ((ctx->rot->highest_bank & 0x3) << 4) |
  1704. ((ctx->rot->ubwc_swizzle & 0x1) << 0));
  1705. if (test_bit(SDE_CAPS_SBUF_1, mdata->sde_caps_map))
  1706. SDE_REGDMA_WRITE(wrptr, ROT_WB_SYS_CACHE_MODE,
  1707. ctx->sys_cache_mode);
  1708. SDE_REGDMA_WRITE(wrptr, ROTTOP_OP_MODE, ctx->op_mode |
  1709. (flags & SDE_ROT_FLAG_ROT_90 ? BIT(1) : 0) | BIT(0));
  1710. sde_hw_rotator_put_regdma_segment(ctx, wrptr);
  1711. /* CDP register WR setting */
  1712. cdp_params.enable = test_bit(SDE_QOS_CDP, mdata->sde_qos_map) ?
  1713. mdata->enable_cdp[SDE_ROT_WR] : false;
  1714. cdp_params.fmt = fmt;
  1715. cdp_params.offset = ROT_WB_CDP_CNTL;
  1716. sde_hw_rotator_cdp_configs(ctx, &cdp_params);
  1717. /* QOS LUT/ Danger LUT/ Safe LUT WR setting */
  1718. sde_hw_rotator_setup_qos_lut_wr(ctx);
  1719. wrptr = sde_hw_rotator_get_regdma_segment(ctx);
  1720. /* setup traffic shaper for 4k 30fps content or if prefill_bw is set */
  1721. if (ctx->is_traffic_shaping || cfg->prefill_bw) {
  1722. u32 bw;
  1723. /*
  1724. * Target to finish in 12ms, and we need to set number of bytes
  1725. * per clock tick for traffic shaping.
  1726. * Each clock tick run @ 19.2MHz, so we need we know total of
  1727. * clock ticks in 14ms, i.e. 12ms/(1/19.2MHz) ==> 23040
  1728. * Finally, calcualte the byte count per clock tick based on
  1729. * resolution, bpp and compression ratio.
  1730. */
  1731. bw = cfg->dst_rect->w * cfg->dst_rect->h;
  1732. if (fmt->chroma_sample == SDE_MDP_CHROMA_420)
  1733. bw = (bw * 3) / 2;
  1734. else
  1735. bw *= fmt->bpp;
  1736. bw /= TRAFFIC_SHAPE_CLKTICK_12MS;
  1737. /* use prefill bandwidth instead if specified */
  1738. if (cfg->prefill_bw)
  1739. bw = DIV_ROUND_UP_SECTOR_T(cfg->prefill_bw,
  1740. TRAFFIC_SHAPE_VSYNC_CLK);
  1741. if (bw > 0xFF)
  1742. bw = 0xFF;
  1743. else if (bw == 0)
  1744. bw = 1;
  1745. SDE_REGDMA_WRITE(wrptr, ROT_WB_TRAFFIC_SHAPER_WR_CLIENT,
  1746. BIT(31) | (cfg->prefill_bw ? BIT(27) : 0) | bw);
  1747. SDEROT_DBG("Enable ROT_WB Traffic Shaper:%d\n", bw);
  1748. } else {
  1749. SDE_REGDMA_WRITE(wrptr, ROT_WB_TRAFFIC_SHAPER_WR_CLIENT, 0);
  1750. SDEROT_DBG("Disable ROT_WB Traffic Shaper\n");
  1751. }
  1752. /* Update command queue write ptr */
  1753. sde_hw_rotator_put_regdma_segment(ctx, wrptr);
  1754. }
  1755. /*
  1756. * sde_hw_rotator_start_no_regdma - start non-regdma operation
  1757. * @ctx: Pointer to rotator context
  1758. * @queue_id: Priority queue identifier
  1759. */
  1760. static u32 sde_hw_rotator_start_no_regdma(struct sde_hw_rotator_context *ctx,
  1761. enum sde_rot_queue_prio queue_id)
  1762. {
  1763. struct sde_hw_rotator *rot = ctx->rot;
  1764. char __iomem *wrptr;
  1765. char __iomem *mem_rdptr;
  1766. char __iomem *addr;
  1767. u32 mask;
  1768. u32 cmd0, cmd1, cmd2;
  1769. u32 blksize;
  1770. /*
  1771. * when regdma is not using, the regdma segment is just a normal
  1772. * DRAM, and not an iomem.
  1773. */
  1774. mem_rdptr = sde_hw_rotator_get_regdma_segment_base(ctx);
  1775. wrptr = sde_hw_rotator_get_regdma_segment(ctx);
  1776. if (!sde_hw_rotator_enable_irq(rot)) {
  1777. SDE_REGDMA_WRITE(wrptr, ROTTOP_INTR_EN, 1);
  1778. SDE_REGDMA_WRITE(wrptr, ROTTOP_INTR_CLEAR, 1);
  1779. reinit_completion(&ctx->rot_comp);
  1780. }
  1781. SDE_REGDMA_WRITE(wrptr, ROTTOP_START_CTRL, ctx->start_ctrl);
  1782. /* Update command queue write ptr */
  1783. sde_hw_rotator_put_regdma_segment(ctx, wrptr);
  1784. SDEROT_DBG("BEGIN %d\n", ctx->timestamp);
  1785. /* Write all command stream to Rotator blocks */
  1786. /* Rotator will start right away after command stream finish writing */
  1787. while (mem_rdptr < wrptr) {
  1788. u32 op = REGDMA_OP_MASK & readl_relaxed_no_log(mem_rdptr);
  1789. switch (op) {
  1790. case REGDMA_OP_NOP:
  1791. SDEROT_DBG("NOP\n");
  1792. mem_rdptr += sizeof(u32);
  1793. break;
  1794. case REGDMA_OP_REGWRITE:
  1795. SDE_REGDMA_READ(mem_rdptr, cmd0);
  1796. SDE_REGDMA_READ(mem_rdptr, cmd1);
  1797. SDEROT_DBG("REGW %6.6x %8.8x\n",
  1798. cmd0 & REGDMA_ADDR_OFFSET_MASK,
  1799. cmd1);
  1800. addr = rot->mdss_base +
  1801. (cmd0 & REGDMA_ADDR_OFFSET_MASK);
  1802. writel_relaxed(cmd1, addr);
  1803. break;
  1804. case REGDMA_OP_REGMODIFY:
  1805. SDE_REGDMA_READ(mem_rdptr, cmd0);
  1806. SDE_REGDMA_READ(mem_rdptr, cmd1);
  1807. SDE_REGDMA_READ(mem_rdptr, cmd2);
  1808. SDEROT_DBG("REGM %6.6x %8.8x %8.8x\n",
  1809. cmd0 & REGDMA_ADDR_OFFSET_MASK,
  1810. cmd1, cmd2);
  1811. addr = rot->mdss_base +
  1812. (cmd0 & REGDMA_ADDR_OFFSET_MASK);
  1813. mask = cmd1;
  1814. writel_relaxed((readl_relaxed(addr) & mask) | cmd2,
  1815. addr);
  1816. break;
  1817. case REGDMA_OP_BLKWRITE_SINGLE:
  1818. SDE_REGDMA_READ(mem_rdptr, cmd0);
  1819. SDE_REGDMA_READ(mem_rdptr, cmd1);
  1820. SDEROT_DBG("BLKWS %6.6x %6.6x\n",
  1821. cmd0 & REGDMA_ADDR_OFFSET_MASK,
  1822. cmd1);
  1823. addr = rot->mdss_base +
  1824. (cmd0 & REGDMA_ADDR_OFFSET_MASK);
  1825. blksize = cmd1;
  1826. while (blksize--) {
  1827. SDE_REGDMA_READ(mem_rdptr, cmd0);
  1828. SDEROT_DBG("DATA %8.8x\n", cmd0);
  1829. writel_relaxed(cmd0, addr);
  1830. }
  1831. break;
  1832. case REGDMA_OP_BLKWRITE_INC:
  1833. SDE_REGDMA_READ(mem_rdptr, cmd0);
  1834. SDE_REGDMA_READ(mem_rdptr, cmd1);
  1835. SDEROT_DBG("BLKWI %6.6x %6.6x\n",
  1836. cmd0 & REGDMA_ADDR_OFFSET_MASK,
  1837. cmd1);
  1838. addr = rot->mdss_base +
  1839. (cmd0 & REGDMA_ADDR_OFFSET_MASK);
  1840. blksize = cmd1;
  1841. while (blksize--) {
  1842. SDE_REGDMA_READ(mem_rdptr, cmd0);
  1843. SDEROT_DBG("DATA %8.8x\n", cmd0);
  1844. writel_relaxed(cmd0, addr);
  1845. addr += 4;
  1846. }
  1847. break;
  1848. default:
  1849. /* Other not supported OP mode
  1850. * Skip data for now for unregonized OP mode
  1851. */
  1852. SDEROT_DBG("UNDEFINED\n");
  1853. mem_rdptr += sizeof(u32);
  1854. break;
  1855. }
  1856. }
  1857. SDEROT_DBG("END %d\n", ctx->timestamp);
  1858. return ctx->timestamp;
  1859. }
  1860. /*
  1861. * sde_hw_rotator_start_regdma - start regdma operation
  1862. * @ctx: Pointer to rotator context
  1863. * @queue_id: Priority queue identifier
  1864. */
  1865. static u32 sde_hw_rotator_start_regdma(struct sde_hw_rotator_context *ctx,
  1866. enum sde_rot_queue_prio queue_id)
  1867. {
  1868. struct sde_rot_data_type *mdata = sde_rot_get_mdata();
  1869. struct sde_hw_rotator *rot = ctx->rot;
  1870. char __iomem *wrptr;
  1871. u32 regdmaSlot;
  1872. u32 offset;
  1873. u32 length;
  1874. u32 ts_length;
  1875. u32 enableInt;
  1876. u32 swts = 0;
  1877. u32 mask = 0;
  1878. u32 trig_sel;
  1879. bool int_trigger = false;
  1880. wrptr = sde_hw_rotator_get_regdma_segment(ctx);
  1881. /* Enable HW timestamp if supported in rotator */
  1882. if (test_bit(SDE_CAPS_HW_TIMESTAMP, mdata->sde_caps_map)) {
  1883. SDE_REGDMA_MODIFY(wrptr, ROTTOP_ROT_CNTR_CTRL,
  1884. ~BIT(queue_id), BIT(queue_id));
  1885. int_trigger = true;
  1886. } else if (ctx->sbuf_mode) {
  1887. int_trigger = true;
  1888. }
  1889. /*
  1890. * Last ROT command must be ROT_START before REGDMA start
  1891. */
  1892. SDE_REGDMA_WRITE(wrptr, ROTTOP_START_CTRL, ctx->start_ctrl);
  1893. sde_hw_rotator_put_regdma_segment(ctx, wrptr);
  1894. /*
  1895. * Start REGDMA with command offset and size
  1896. */
  1897. regdmaSlot = sde_hw_rotator_get_regdma_ctxidx(ctx);
  1898. length = (wrptr - ctx->regdma_base) / 4;
  1899. offset = (ctx->regdma_base - (rot->mdss_base +
  1900. REGDMA_RAM_REGDMA_CMD_RAM)) / sizeof(u32);
  1901. enableInt = ((ctx->timestamp & 1) + 1) << 30;
  1902. trig_sel = ctx->sbuf_mode ? REGDMA_CMD_TRIG_SEL_MDP_FLUSH :
  1903. REGDMA_CMD_TRIG_SEL_SW_START;
  1904. SDEROT_DBG(
  1905. "regdma(%d)[%d] <== INT:0x%X|length:%d|offset:0x%X, ts:%X\n",
  1906. queue_id, regdmaSlot, enableInt, length, offset,
  1907. ctx->timestamp);
  1908. /* ensure the command packet is issued before the submit command */
  1909. wmb();
  1910. /* REGDMA submission for current context */
  1911. if (queue_id == ROT_QUEUE_HIGH_PRIORITY) {
  1912. SDE_ROTREG_WRITE(rot->mdss_base,
  1913. REGDMA_CSR_REGDMA_QUEUE_0_SUBMIT,
  1914. (int_trigger ? enableInt : 0) | trig_sel |
  1915. ((length & 0x3ff) << 14) | offset);
  1916. swts = ctx->timestamp;
  1917. mask = ~SDE_REGDMA_SWTS_MASK;
  1918. } else {
  1919. SDE_ROTREG_WRITE(rot->mdss_base,
  1920. REGDMA_CSR_REGDMA_QUEUE_1_SUBMIT,
  1921. (int_trigger ? enableInt : 0) | trig_sel |
  1922. ((length & 0x3ff) << 14) | offset);
  1923. swts = ctx->timestamp << SDE_REGDMA_SWTS_SHIFT;
  1924. mask = ~(SDE_REGDMA_SWTS_MASK << SDE_REGDMA_SWTS_SHIFT);
  1925. }
  1926. SDEROT_EVTLOG(ctx->timestamp, queue_id, length, offset, ctx->sbuf_mode);
  1927. /* sw timestamp update can only be used in offline multi-context mode */
  1928. if (!int_trigger) {
  1929. /* Write timestamp after previous rotator job finished */
  1930. sde_hw_rotator_setup_timestamp_packet(ctx, mask, swts);
  1931. offset += length;
  1932. ts_length = sde_hw_rotator_get_regdma_segment(ctx) - wrptr;
  1933. ts_length /= sizeof(u32);
  1934. WARN_ON((length + ts_length) > SDE_HW_ROT_REGDMA_SEG_SIZE);
  1935. /* ensure command packet is issue before the submit command */
  1936. wmb();
  1937. SDEROT_EVTLOG(queue_id, enableInt, ts_length, offset);
  1938. if (queue_id == ROT_QUEUE_HIGH_PRIORITY) {
  1939. SDE_ROTREG_WRITE(rot->mdss_base,
  1940. REGDMA_CSR_REGDMA_QUEUE_0_SUBMIT,
  1941. enableInt | (ts_length << 14) | offset);
  1942. } else {
  1943. SDE_ROTREG_WRITE(rot->mdss_base,
  1944. REGDMA_CSR_REGDMA_QUEUE_1_SUBMIT,
  1945. enableInt | (ts_length << 14) | offset);
  1946. }
  1947. }
  1948. /* Update command queue write ptr */
  1949. sde_hw_rotator_put_regdma_segment(ctx, wrptr);
  1950. return ctx->timestamp;
  1951. }
  1952. /*
  1953. * sde_hw_rotator_wait_done_no_regdma - wait for non-regdma completion
  1954. * @ctx: Pointer to rotator context
  1955. * @queue_id: Priority queue identifier
  1956. * @flags: Option flag
  1957. */
  1958. static u32 sde_hw_rotator_wait_done_no_regdma(
  1959. struct sde_hw_rotator_context *ctx,
  1960. enum sde_rot_queue_prio queue_id, u32 flag)
  1961. {
  1962. struct sde_hw_rotator *rot = ctx->rot;
  1963. int rc = 0;
  1964. u32 sts = 0;
  1965. u32 status;
  1966. unsigned long flags;
  1967. if (rot->irq_num >= 0) {
  1968. SDEROT_DBG("Wait for Rotator completion\n");
  1969. rc = wait_for_completion_timeout(&ctx->rot_comp,
  1970. ctx->sbuf_mode ?
  1971. msecs_to_jiffies(KOFF_TIMEOUT_SBUF) :
  1972. msecs_to_jiffies(rot->koff_timeout));
  1973. spin_lock_irqsave(&rot->rotisr_lock, flags);
  1974. status = SDE_ROTREG_READ(rot->mdss_base, ROTTOP_STATUS);
  1975. if (rc == 0) {
  1976. /*
  1977. * Timeout, there might be error,
  1978. * or rotator still busy
  1979. */
  1980. if (status & ROT_BUSY_BIT)
  1981. SDEROT_ERR(
  1982. "Timeout waiting for rotator done\n");
  1983. else if (status & ROT_ERROR_BIT)
  1984. SDEROT_ERR(
  1985. "Rotator report error status\n");
  1986. else
  1987. SDEROT_WARN(
  1988. "Timeout waiting, but rotator job is done!!\n");
  1989. sde_hw_rotator_disable_irq(rot);
  1990. }
  1991. spin_unlock_irqrestore(&rot->rotisr_lock, flags);
  1992. } else {
  1993. int cnt = 200;
  1994. do {
  1995. udelay(500);
  1996. status = SDE_ROTREG_READ(rot->mdss_base, ROTTOP_STATUS);
  1997. cnt--;
  1998. } while ((cnt > 0) && (status & ROT_BUSY_BIT)
  1999. && ((status & ROT_ERROR_BIT) == 0));
  2000. if (status & ROT_ERROR_BIT)
  2001. SDEROT_ERR("Rotator error\n");
  2002. else if (status & ROT_BUSY_BIT)
  2003. SDEROT_ERR("Rotator busy\n");
  2004. SDE_ROTREG_WRITE(rot->mdss_base, ROTTOP_INTR_CLEAR,
  2005. ROT_DONE_CLEAR);
  2006. }
  2007. sts = (status & ROT_ERROR_BIT) ? -ENODEV : 0;
  2008. return sts;
  2009. }
  2010. /*
  2011. * sde_hw_rotator_wait_done_regdma - wait for regdma completion
  2012. * @ctx: Pointer to rotator context
  2013. * @queue_id: Priority queue identifier
  2014. * @flags: Option flag
  2015. */
  2016. static u32 sde_hw_rotator_wait_done_regdma(
  2017. struct sde_hw_rotator_context *ctx,
  2018. enum sde_rot_queue_prio queue_id, u32 flag)
  2019. {
  2020. struct sde_hw_rotator *rot = ctx->rot;
  2021. int rc = 0;
  2022. bool timeout = false;
  2023. bool pending;
  2024. bool abort;
  2025. u32 status;
  2026. u32 last_isr;
  2027. u32 last_ts;
  2028. u32 int_id;
  2029. u32 swts;
  2030. u32 sts = 0;
  2031. u32 ubwcerr;
  2032. u32 hwts[ROT_QUEUE_MAX];
  2033. unsigned long flags;
  2034. if (rot->irq_num >= 0) {
  2035. SDEROT_DBG("Wait for REGDMA completion, ctx:%pK, ts:%X\n",
  2036. ctx, ctx->timestamp);
  2037. rc = wait_event_timeout(ctx->regdma_waitq,
  2038. !rot->ops.get_pending_ts(rot, ctx, &swts),
  2039. ctx->sbuf_mode ?
  2040. msecs_to_jiffies(KOFF_TIMEOUT_SBUF) :
  2041. msecs_to_jiffies(rot->koff_timeout));
  2042. ATRACE_INT("sde_rot_done", 0);
  2043. spin_lock_irqsave(&rot->rotisr_lock, flags);
  2044. last_isr = ctx->last_regdma_isr_status;
  2045. last_ts = ctx->last_regdma_timestamp;
  2046. abort = ctx->abort;
  2047. status = last_isr & REGDMA_INT_MASK;
  2048. int_id = last_ts & 1;
  2049. SDEROT_DBG("INT status:0x%X, INT id:%d, timestamp:0x%X\n",
  2050. status, int_id, last_ts);
  2051. if (rc == 0 || (status & REGDMA_INT_ERR_MASK) || abort) {
  2052. timeout = true;
  2053. pending = rot->ops.get_pending_ts(rot, ctx, &swts);
  2054. /* cache ubwcerr and hw timestamps while locked */
  2055. ubwcerr = SDE_ROTREG_READ(rot->mdss_base,
  2056. ROT_SSPP_UBWC_ERROR_STATUS);
  2057. hwts[ROT_QUEUE_HIGH_PRIORITY] =
  2058. __sde_hw_rotator_get_timestamp(rot,
  2059. ROT_QUEUE_HIGH_PRIORITY);
  2060. hwts[ROT_QUEUE_LOW_PRIORITY] =
  2061. __sde_hw_rotator_get_timestamp(rot,
  2062. ROT_QUEUE_LOW_PRIORITY);
  2063. spin_unlock_irqrestore(&rot->rotisr_lock, flags);
  2064. if (ubwcerr || abort ||
  2065. sde_hw_rotator_halt_vbif_xin_client()) {
  2066. /*
  2067. * Perform recovery for ROT SSPP UBWC decode
  2068. * error.
  2069. * - SW reset rotator hw block
  2070. * - reset TS logic so all pending rotation
  2071. * in hw queue got done signalled
  2072. */
  2073. if (!sde_hw_rotator_reset(rot, ctx))
  2074. status = REGDMA_INCOMPLETE_CMD;
  2075. else
  2076. status = ROT_ERROR_BIT;
  2077. } else {
  2078. status = ROT_ERROR_BIT;
  2079. }
  2080. spin_lock_irqsave(&rot->rotisr_lock, flags);
  2081. } else {
  2082. if (rc == 1)
  2083. SDEROT_WARN(
  2084. "REGDMA done but no irq, ts:0x%X/0x%X\n",
  2085. ctx->timestamp, swts);
  2086. status = 0;
  2087. }
  2088. spin_unlock_irqrestore(&rot->rotisr_lock, flags);
  2089. /* dump rot status after releasing lock if timeout occurred */
  2090. if (timeout) {
  2091. SDEROT_ERR(
  2092. "TIMEOUT, ts:0x%X/0x%X, pending:%d, abort:%d\n",
  2093. ctx->timestamp, swts, pending, abort);
  2094. SDEROT_ERR(
  2095. "Cached: HW ts0/ts1 = %x/%x, ubwcerr = %x\n",
  2096. hwts[ROT_QUEUE_HIGH_PRIORITY],
  2097. hwts[ROT_QUEUE_LOW_PRIORITY], ubwcerr);
  2098. if (status & REGDMA_WATCHDOG_INT)
  2099. SDEROT_ERR("REGDMA watchdog interrupt\n");
  2100. else if (status & REGDMA_INVALID_DESCRIPTOR)
  2101. SDEROT_ERR("REGDMA invalid descriptor\n");
  2102. else if (status & REGDMA_INCOMPLETE_CMD)
  2103. SDEROT_ERR("REGDMA incomplete command\n");
  2104. else if (status & REGDMA_INVALID_CMD)
  2105. SDEROT_ERR("REGDMA invalid command\n");
  2106. _sde_hw_rotator_dump_status(rot, &ubwcerr);
  2107. }
  2108. } else {
  2109. int cnt = 200;
  2110. bool pending;
  2111. do {
  2112. udelay(500);
  2113. last_isr = SDE_ROTREG_READ(rot->mdss_base,
  2114. REGDMA_CSR_REGDMA_INT_STATUS);
  2115. pending = rot->ops.get_pending_ts(rot, ctx, &swts);
  2116. cnt--;
  2117. } while ((cnt > 0) && pending &&
  2118. ((last_isr & REGDMA_INT_ERR_MASK) == 0));
  2119. if (last_isr & REGDMA_INT_ERR_MASK) {
  2120. SDEROT_ERR("Rotator error, ts:0x%X/0x%X status:%x\n",
  2121. ctx->timestamp, swts, last_isr);
  2122. _sde_hw_rotator_dump_status(rot, NULL);
  2123. status = ROT_ERROR_BIT;
  2124. } else if (pending) {
  2125. SDEROT_ERR("Rotator timeout, ts:0x%X/0x%X status:%x\n",
  2126. ctx->timestamp, swts, last_isr);
  2127. _sde_hw_rotator_dump_status(rot, NULL);
  2128. status = ROT_ERROR_BIT;
  2129. } else {
  2130. status = 0;
  2131. }
  2132. SDE_ROTREG_WRITE(rot->mdss_base, REGDMA_CSR_REGDMA_INT_CLEAR,
  2133. last_isr);
  2134. }
  2135. sts = (status & (ROT_ERROR_BIT | REGDMA_INCOMPLETE_CMD)) ? -ENODEV : 0;
  2136. if (status & ROT_ERROR_BIT)
  2137. SDEROT_EVTLOG_TOUT_HANDLER("rot", "rot_dbg_bus",
  2138. "vbif_dbg_bus", "panic");
  2139. return sts;
  2140. }
  2141. /*
  2142. * setup_rotator_ops - setup callback functions for the low-level HAL
  2143. * @ops: Pointer to low-level ops callback
  2144. * @mode: Operation mode (non-regdma or regdma)
  2145. * @use_hwts: HW timestamp support mode
  2146. */
  2147. static void setup_rotator_ops(struct sde_hw_rotator_ops *ops,
  2148. enum sde_rotator_regdma_mode mode,
  2149. bool use_hwts)
  2150. {
  2151. ops->setup_rotator_fetchengine = sde_hw_rotator_setup_fetchengine;
  2152. ops->setup_rotator_wbengine = sde_hw_rotator_setup_wbengine;
  2153. if (mode == ROT_REGDMA_ON) {
  2154. ops->start_rotator = sde_hw_rotator_start_regdma;
  2155. ops->wait_rotator_done = sde_hw_rotator_wait_done_regdma;
  2156. } else {
  2157. ops->start_rotator = sde_hw_rotator_start_no_regdma;
  2158. ops->wait_rotator_done = sde_hw_rotator_wait_done_no_regdma;
  2159. }
  2160. if (use_hwts) {
  2161. ops->get_pending_ts = sde_hw_rotator_pending_hwts;
  2162. ops->update_ts = sde_hw_rotator_update_hwts;
  2163. } else {
  2164. ops->get_pending_ts = sde_hw_rotator_pending_swts;
  2165. ops->update_ts = sde_hw_rotator_update_swts;
  2166. }
  2167. }
  2168. /*
  2169. * sde_hw_rotator_swts_create - create software timestamp buffer
  2170. * @rot: Pointer to rotator hw
  2171. *
  2172. * This buffer is used by regdma to keep track of last completed command.
  2173. */
  2174. static int sde_hw_rotator_swts_create(struct sde_hw_rotator *rot)
  2175. {
  2176. int rc = 0;
  2177. struct sde_mdp_img_data *data;
  2178. u32 bufsize = sizeof(int) * SDE_HW_ROT_REGDMA_TOTAL_CTX * 2;
  2179. if (bufsize < SZ_4K)
  2180. bufsize = SZ_4K;
  2181. data = &rot->swts_buf;
  2182. data->len = bufsize;
  2183. data->srcp_dma_buf = sde_rot_get_dmabuf(data);
  2184. if (!data->srcp_dma_buf) {
  2185. SDEROT_ERR("Fail dmabuf create\n");
  2186. return -ENOMEM;
  2187. }
  2188. sde_smmu_ctrl(1);
  2189. data->srcp_attachment = sde_smmu_dma_buf_attach(data->srcp_dma_buf,
  2190. &rot->pdev->dev, SDE_IOMMU_DOMAIN_ROT_UNSECURE);
  2191. if (IS_ERR_OR_NULL(data->srcp_attachment)) {
  2192. SDEROT_ERR("sde_smmu_dma_buf_attach error\n");
  2193. rc = -ENOMEM;
  2194. goto err_put;
  2195. }
  2196. data->srcp_table = dma_buf_map_attachment(data->srcp_attachment,
  2197. DMA_BIDIRECTIONAL);
  2198. if (IS_ERR_OR_NULL(data->srcp_table)) {
  2199. SDEROT_ERR("dma_buf_map_attachment error\n");
  2200. rc = -ENOMEM;
  2201. goto err_detach;
  2202. }
  2203. rc = sde_smmu_map_dma_buf(data->srcp_dma_buf, data->srcp_table,
  2204. SDE_IOMMU_DOMAIN_ROT_UNSECURE, &data->addr,
  2205. &data->len, DMA_BIDIRECTIONAL);
  2206. if (rc < 0) {
  2207. SDEROT_ERR("smmu_map_dma_buf failed: (%d)\n", rc);
  2208. goto err_unmap;
  2209. }
  2210. data->mapped = true;
  2211. SDEROT_DBG("swts buffer mapped: %pad/%lx va:%pK\n", &data->addr,
  2212. data->len, rot->swts_buffer);
  2213. sde_smmu_ctrl(0);
  2214. return rc;
  2215. err_unmap:
  2216. dma_buf_unmap_attachment(data->srcp_attachment, data->srcp_table,
  2217. DMA_FROM_DEVICE);
  2218. err_detach:
  2219. dma_buf_detach(data->srcp_dma_buf, data->srcp_attachment);
  2220. err_put:
  2221. data->srcp_dma_buf = NULL;
  2222. sde_smmu_ctrl(0);
  2223. return rc;
  2224. }
  2225. /*
  2226. * sde_hw_rotator_swts_destroy - destroy software timestamp buffer
  2227. * @rot: Pointer to rotator hw
  2228. */
  2229. static void sde_hw_rotator_swts_destroy(struct sde_hw_rotator *rot)
  2230. {
  2231. struct sde_mdp_img_data *data;
  2232. data = &rot->swts_buf;
  2233. sde_smmu_unmap_dma_buf(data->srcp_table, SDE_IOMMU_DOMAIN_ROT_UNSECURE,
  2234. DMA_FROM_DEVICE, data->srcp_dma_buf);
  2235. dma_buf_unmap_attachment(data->srcp_attachment, data->srcp_table,
  2236. DMA_FROM_DEVICE);
  2237. dma_buf_detach(data->srcp_dma_buf, data->srcp_attachment);
  2238. dma_buf_put(data->srcp_dma_buf);
  2239. data->addr = 0;
  2240. data->srcp_dma_buf = NULL;
  2241. data->srcp_attachment = NULL;
  2242. data->mapped = false;
  2243. }
  2244. /*
  2245. * sde_hw_rotator_pre_pmevent - SDE rotator core will call this before a
  2246. * PM event occurs
  2247. * @mgr: Pointer to rotator manager
  2248. * @pmon: Boolean indicate an on/off power event
  2249. */
  2250. void sde_hw_rotator_pre_pmevent(struct sde_rot_mgr *mgr, bool pmon)
  2251. {
  2252. struct sde_hw_rotator *rot;
  2253. u32 l_ts, h_ts, l_hwts, h_hwts;
  2254. u32 rotsts, regdmasts, rotopmode;
  2255. /*
  2256. * Check last HW timestamp with SW timestamp before power off event.
  2257. * If there is a mismatch, that will be quite possible the rotator HW
  2258. * is either hang or not finishing last submitted job. In that case,
  2259. * it is best to do a timeout eventlog to capture some good events
  2260. * log data for analysis.
  2261. */
  2262. if (!pmon && mgr && mgr->hw_data) {
  2263. rot = mgr->hw_data;
  2264. h_ts = atomic_read(&rot->timestamp[ROT_QUEUE_HIGH_PRIORITY]) &
  2265. SDE_REGDMA_SWTS_MASK;
  2266. l_ts = atomic_read(&rot->timestamp[ROT_QUEUE_LOW_PRIORITY]) &
  2267. SDE_REGDMA_SWTS_MASK;
  2268. /* Need to turn on clock to access rotator register */
  2269. sde_rotator_clk_ctrl(mgr, true);
  2270. l_hwts = __sde_hw_rotator_get_timestamp(rot,
  2271. ROT_QUEUE_LOW_PRIORITY);
  2272. h_hwts = __sde_hw_rotator_get_timestamp(rot,
  2273. ROT_QUEUE_HIGH_PRIORITY);
  2274. regdmasts = SDE_ROTREG_READ(rot->mdss_base,
  2275. REGDMA_CSR_REGDMA_BLOCK_STATUS);
  2276. rotsts = SDE_ROTREG_READ(rot->mdss_base, ROTTOP_STATUS);
  2277. rotopmode = SDE_ROTREG_READ(rot->mdss_base, ROTTOP_OP_MODE);
  2278. SDEROT_DBG(
  2279. "swts(l/h):0x%x/0x%x, hwts(l/h):0x%x/0x%x, regdma-sts:0x%x, rottop-sts:0x%x\n",
  2280. l_ts, h_ts, l_hwts, h_hwts,
  2281. regdmasts, rotsts);
  2282. SDEROT_EVTLOG(l_ts, h_ts, l_hwts, h_hwts, regdmasts, rotsts);
  2283. if (((l_ts != l_hwts) || (h_ts != h_hwts)) &&
  2284. ((regdmasts & REGDMA_BUSY) ||
  2285. (rotsts & ROT_STATUS_MASK))) {
  2286. SDEROT_ERR(
  2287. "Mismatch SWTS with HWTS: swts(l/h):0x%x/0x%x, hwts(l/h):0x%x/0x%x, regdma-sts:0x%x, rottop-sts:0x%x\n",
  2288. l_ts, h_ts, l_hwts, h_hwts,
  2289. regdmasts, rotsts);
  2290. _sde_hw_rotator_dump_status(rot, NULL);
  2291. SDEROT_EVTLOG_TOUT_HANDLER("rot", "rot_dbg_bus",
  2292. "vbif_dbg_bus", "panic");
  2293. } else if (!SDE_ROTTOP_IN_OFFLINE_MODE(rotopmode) &&
  2294. ((regdmasts & REGDMA_BUSY) ||
  2295. (rotsts & ROT_BUSY_BIT))) {
  2296. /*
  2297. * rotator can stuck in inline while mdp is detached
  2298. */
  2299. SDEROT_WARN(
  2300. "Inline Rot busy: regdma-sts:0x%x, rottop-sts:0x%x, rottop-opmode:0x%x\n",
  2301. regdmasts, rotsts, rotopmode);
  2302. sde_hw_rotator_reset(rot, NULL);
  2303. } else if ((regdmasts & REGDMA_BUSY) ||
  2304. (rotsts & ROT_BUSY_BIT)) {
  2305. _sde_hw_rotator_dump_status(rot, NULL);
  2306. SDEROT_EVTLOG_TOUT_HANDLER("rot", "rot_dbg_bus",
  2307. "vbif_dbg_bus", "panic");
  2308. sde_hw_rotator_reset(rot, NULL);
  2309. }
  2310. /* Turn off rotator clock after checking rotator registers */
  2311. sde_rotator_clk_ctrl(mgr, false);
  2312. }
  2313. }
  2314. /*
  2315. * sde_hw_rotator_post_pmevent - SDE rotator core will call this after a
  2316. * PM event occurs
  2317. * @mgr: Pointer to rotator manager
  2318. * @pmon: Boolean indicate an on/off power event
  2319. */
  2320. void sde_hw_rotator_post_pmevent(struct sde_rot_mgr *mgr, bool pmon)
  2321. {
  2322. struct sde_hw_rotator *rot;
  2323. u32 l_ts, h_ts;
  2324. /*
  2325. * After a power on event, the rotator HW is reset to default setting.
  2326. * It is necessary to synchronize the SW timestamp with the HW.
  2327. */
  2328. if (pmon && mgr && mgr->hw_data) {
  2329. rot = mgr->hw_data;
  2330. h_ts = atomic_read(&rot->timestamp[ROT_QUEUE_HIGH_PRIORITY]);
  2331. l_ts = atomic_read(&rot->timestamp[ROT_QUEUE_LOW_PRIORITY]);
  2332. SDEROT_DBG("h_ts:0x%x, l_ts;0x%x\n", h_ts, l_ts);
  2333. SDEROT_EVTLOG(h_ts, l_ts);
  2334. rot->reset_hw_ts = true;
  2335. rot->last_hwts[ROT_QUEUE_LOW_PRIORITY] =
  2336. l_ts & SDE_REGDMA_SWTS_MASK;
  2337. rot->last_hwts[ROT_QUEUE_HIGH_PRIORITY] =
  2338. h_ts & SDE_REGDMA_SWTS_MASK;
  2339. }
  2340. }
  2341. /*
  2342. * sde_hw_rotator_destroy - Destroy hw rotator and free allocated resources
  2343. * @mgr: Pointer to rotator manager
  2344. */
  2345. static void sde_hw_rotator_destroy(struct sde_rot_mgr *mgr)
  2346. {
  2347. struct sde_rot_data_type *mdata = sde_rot_get_mdata();
  2348. struct sde_hw_rotator *rot;
  2349. if (!mgr || !mgr->pdev || !mgr->hw_data) {
  2350. SDEROT_ERR("null parameters\n");
  2351. return;
  2352. }
  2353. rot = mgr->hw_data;
  2354. if (rot->irq_num >= 0)
  2355. devm_free_irq(&mgr->pdev->dev, rot->irq_num, mdata);
  2356. if (!test_bit(SDE_CAPS_HW_TIMESTAMP, mdata->sde_caps_map) &&
  2357. rot->mode == ROT_REGDMA_ON)
  2358. sde_hw_rotator_swts_destroy(rot);
  2359. devm_kfree(&mgr->pdev->dev, mgr->hw_data);
  2360. mgr->hw_data = NULL;
  2361. }
  2362. /*
  2363. * sde_hw_rotator_alloc_ext - allocate rotator resource from rotator hw
  2364. * @mgr: Pointer to rotator manager
  2365. * @pipe_id: pipe identifier (not used)
  2366. * @wb_id: writeback identifier/priority queue identifier
  2367. *
  2368. * This function allocates a new hw rotator resource for the given priority.
  2369. */
  2370. static struct sde_rot_hw_resource *sde_hw_rotator_alloc_ext(
  2371. struct sde_rot_mgr *mgr, u32 pipe_id, u32 wb_id)
  2372. {
  2373. struct sde_rot_data_type *mdata = sde_rot_get_mdata();
  2374. struct sde_hw_rotator_resource_info *resinfo;
  2375. if (!mgr || !mgr->hw_data) {
  2376. SDEROT_ERR("null parameters\n");
  2377. return NULL;
  2378. }
  2379. /*
  2380. * Allocate rotator resource info. Each allocation is per
  2381. * HW priority queue
  2382. */
  2383. resinfo = devm_kzalloc(&mgr->pdev->dev, sizeof(*resinfo), GFP_KERNEL);
  2384. if (!resinfo) {
  2385. SDEROT_ERR("Failed allocation HW rotator resource info\n");
  2386. return NULL;
  2387. }
  2388. resinfo->rot = mgr->hw_data;
  2389. resinfo->hw.wb_id = wb_id;
  2390. atomic_set(&resinfo->hw.num_active, 0);
  2391. init_waitqueue_head(&resinfo->hw.wait_queue);
  2392. /* For non-regdma, only support one active session */
  2393. if (resinfo->rot->mode == ROT_REGDMA_OFF)
  2394. resinfo->hw.max_active = 1;
  2395. else {
  2396. resinfo->hw.max_active = SDE_HW_ROT_REGDMA_TOTAL_CTX - 1;
  2397. if (!test_bit(SDE_CAPS_HW_TIMESTAMP, mdata->sde_caps_map) &&
  2398. (!resinfo->rot->swts_buf.mapped))
  2399. sde_hw_rotator_swts_create(resinfo->rot);
  2400. }
  2401. sde_hw_rotator_enable_irq(resinfo->rot);
  2402. SDEROT_DBG("New rotator resource:%pK, priority:%d\n",
  2403. resinfo, wb_id);
  2404. return &resinfo->hw;
  2405. }
  2406. /*
  2407. * sde_hw_rotator_free_ext - free the given rotator resource
  2408. * @mgr: Pointer to rotator manager
  2409. * @hw: Pointer to rotator resource
  2410. */
  2411. static void sde_hw_rotator_free_ext(struct sde_rot_mgr *mgr,
  2412. struct sde_rot_hw_resource *hw)
  2413. {
  2414. struct sde_hw_rotator_resource_info *resinfo;
  2415. if (!mgr || !mgr->hw_data)
  2416. return;
  2417. resinfo = container_of(hw, struct sde_hw_rotator_resource_info, hw);
  2418. SDEROT_DBG(
  2419. "Free rotator resource:%pK, priority:%d, active:%d, pending:%d\n",
  2420. resinfo, hw->wb_id, atomic_read(&hw->num_active),
  2421. hw->pending_count);
  2422. sde_hw_rotator_disable_irq(resinfo->rot);
  2423. devm_kfree(&mgr->pdev->dev, resinfo);
  2424. }
  2425. /*
  2426. * sde_hw_rotator_alloc_rotctx - allocate rotator context
  2427. * @rot: Pointer to rotator hw
  2428. * @hw: Pointer to rotator resource
  2429. * @session_id: Session identifier of this context
  2430. * @sequence_id: Sequence identifier of this request
  2431. * @sbuf_mode: true if stream buffer is requested
  2432. *
  2433. * This function allocates a new rotator context for the given session id.
  2434. */
  2435. static struct sde_hw_rotator_context *sde_hw_rotator_alloc_rotctx(
  2436. struct sde_hw_rotator *rot,
  2437. struct sde_rot_hw_resource *hw,
  2438. u32 session_id,
  2439. u32 sequence_id,
  2440. bool sbuf_mode)
  2441. {
  2442. struct sde_hw_rotator_context *ctx;
  2443. /* Allocate rotator context */
  2444. ctx = devm_kzalloc(&rot->pdev->dev, sizeof(*ctx), GFP_KERNEL);
  2445. if (!ctx) {
  2446. SDEROT_ERR("Failed allocation HW rotator context\n");
  2447. return NULL;
  2448. }
  2449. ctx->rot = rot;
  2450. ctx->q_id = hw->wb_id;
  2451. ctx->session_id = session_id;
  2452. ctx->sequence_id = sequence_id;
  2453. ctx->hwres = hw;
  2454. ctx->timestamp = atomic_add_return(1, &rot->timestamp[ctx->q_id]);
  2455. ctx->timestamp &= SDE_REGDMA_SWTS_MASK;
  2456. ctx->is_secure = false;
  2457. ctx->sbuf_mode = sbuf_mode;
  2458. INIT_LIST_HEAD(&ctx->list);
  2459. ctx->regdma_base = rot->cmd_wr_ptr[ctx->q_id]
  2460. [sde_hw_rotator_get_regdma_ctxidx(ctx)];
  2461. ctx->regdma_wrptr = ctx->regdma_base;
  2462. ctx->ts_addr = (dma_addr_t)((u32 *)rot->swts_buf.addr +
  2463. ctx->q_id * SDE_HW_ROT_REGDMA_TOTAL_CTX +
  2464. sde_hw_rotator_get_regdma_ctxidx(ctx));
  2465. ctx->last_regdma_timestamp = SDE_REGDMA_SWTS_INVALID;
  2466. init_completion(&ctx->rot_comp);
  2467. init_waitqueue_head(&ctx->regdma_waitq);
  2468. /* Store rotator context for lookup purpose */
  2469. sde_hw_rotator_put_ctx(ctx);
  2470. SDEROT_DBG(
  2471. "New rot CTX:%pK, ctxidx:%d, session-id:%d, prio:%d, timestamp:%X, active:%d sbuf:%d\n",
  2472. ctx, sde_hw_rotator_get_regdma_ctxidx(ctx), ctx->session_id,
  2473. ctx->q_id, ctx->timestamp,
  2474. atomic_read(&ctx->hwres->num_active),
  2475. ctx->sbuf_mode);
  2476. return ctx;
  2477. }
  2478. /*
  2479. * sde_hw_rotator_free_rotctx - free the given rotator context
  2480. * @rot: Pointer to rotator hw
  2481. * @ctx: Pointer to rotator context
  2482. */
  2483. static void sde_hw_rotator_free_rotctx(struct sde_hw_rotator *rot,
  2484. struct sde_hw_rotator_context *ctx)
  2485. {
  2486. if (!rot || !ctx)
  2487. return;
  2488. SDEROT_DBG(
  2489. "Free rot CTX:%pK, ctxidx:%d, session-id:%d, prio:%d, timestamp:%X, active:%d sbuf:%d\n",
  2490. ctx, sde_hw_rotator_get_regdma_ctxidx(ctx), ctx->session_id,
  2491. ctx->q_id, ctx->timestamp,
  2492. atomic_read(&ctx->hwres->num_active),
  2493. ctx->sbuf_mode);
  2494. /* Clear rotator context from lookup purpose */
  2495. sde_hw_rotator_clr_ctx(ctx);
  2496. devm_kfree(&rot->pdev->dev, ctx);
  2497. }
  2498. /*
  2499. * sde_hw_rotator_config - configure hw for the given rotation entry
  2500. * @hw: Pointer to rotator resource
  2501. * @entry: Pointer to rotation entry
  2502. *
  2503. * This function setup the fetch/writeback/rotator blocks, as well as VBIF
  2504. * based on the given rotation entry.
  2505. */
  2506. static int sde_hw_rotator_config(struct sde_rot_hw_resource *hw,
  2507. struct sde_rot_entry *entry)
  2508. {
  2509. struct sde_rot_data_type *mdata = sde_rot_get_mdata();
  2510. struct sde_hw_rotator *rot;
  2511. struct sde_hw_rotator_resource_info *resinfo;
  2512. struct sde_hw_rotator_context *ctx;
  2513. struct sde_hw_rot_sspp_cfg sspp_cfg;
  2514. struct sde_hw_rot_wb_cfg wb_cfg;
  2515. u32 danger_lut = 0; /* applicable for realtime client only */
  2516. u32 safe_lut = 0; /* applicable for realtime client only */
  2517. u32 flags = 0;
  2518. u32 rststs = 0;
  2519. struct sde_rotation_item *item;
  2520. int ret;
  2521. if (!hw || !entry) {
  2522. SDEROT_ERR("null hw resource/entry\n");
  2523. return -EINVAL;
  2524. }
  2525. resinfo = container_of(hw, struct sde_hw_rotator_resource_info, hw);
  2526. rot = resinfo->rot;
  2527. item = &entry->item;
  2528. ctx = sde_hw_rotator_alloc_rotctx(rot, hw, item->session_id,
  2529. item->sequence_id, item->output.sbuf);
  2530. if (!ctx) {
  2531. SDEROT_ERR("Failed allocating rotator context!!\n");
  2532. return -EINVAL;
  2533. }
  2534. /* save entry for debugging purposes */
  2535. ctx->last_entry = entry;
  2536. if (test_bit(SDE_CAPS_SBUF_1, mdata->sde_caps_map)) {
  2537. if (entry->dst_buf.sbuf) {
  2538. u32 op_mode;
  2539. if (entry->item.trigger ==
  2540. SDE_ROTATOR_TRIGGER_COMMAND)
  2541. ctx->start_ctrl = (rot->cmd_trigger << 4);
  2542. else if (entry->item.trigger ==
  2543. SDE_ROTATOR_TRIGGER_VIDEO)
  2544. ctx->start_ctrl = (rot->vid_trigger << 4);
  2545. else
  2546. ctx->start_ctrl = 0;
  2547. ctx->sys_cache_mode = BIT(15) |
  2548. ((item->output.scid & 0x1f) << 8) |
  2549. (item->output.writeback ? 0x5 : 0);
  2550. ctx->op_mode = BIT(4) |
  2551. ((ctx->rot->sbuf_headroom & 0xff) << 8);
  2552. /* detect transition to inline mode */
  2553. op_mode = (SDE_ROTREG_READ(rot->mdss_base,
  2554. ROTTOP_OP_MODE) >> 4) & 0x3;
  2555. if (!op_mode) {
  2556. u32 status;
  2557. status = SDE_ROTREG_READ(rot->mdss_base,
  2558. ROTTOP_STATUS);
  2559. if (status & BIT(0)) {
  2560. SDEROT_ERR("rotator busy 0x%x\n",
  2561. status);
  2562. _sde_hw_rotator_dump_status(rot, NULL);
  2563. SDEROT_EVTLOG_TOUT_HANDLER("rot",
  2564. "vbif_dbg_bus",
  2565. "panic");
  2566. }
  2567. }
  2568. } else {
  2569. ctx->start_ctrl = BIT(0);
  2570. ctx->sys_cache_mode = 0;
  2571. ctx->op_mode = 0;
  2572. }
  2573. } else {
  2574. ctx->start_ctrl = BIT(0);
  2575. }
  2576. SDEROT_EVTLOG(ctx->start_ctrl, ctx->sys_cache_mode, ctx->op_mode);
  2577. /*
  2578. * if Rotator HW is reset, but missing PM event notification, we
  2579. * need to init the SW timestamp automatically.
  2580. */
  2581. rststs = SDE_ROTREG_READ(rot->mdss_base, REGDMA_RESET_STATUS_REG);
  2582. if (!rot->reset_hw_ts && rststs) {
  2583. u32 l_ts, h_ts, l_hwts, h_hwts;
  2584. h_hwts = __sde_hw_rotator_get_timestamp(rot,
  2585. ROT_QUEUE_HIGH_PRIORITY);
  2586. l_hwts = __sde_hw_rotator_get_timestamp(rot,
  2587. ROT_QUEUE_LOW_PRIORITY);
  2588. h_ts = atomic_read(&rot->timestamp[ROT_QUEUE_HIGH_PRIORITY]);
  2589. l_ts = atomic_read(&rot->timestamp[ROT_QUEUE_LOW_PRIORITY]);
  2590. SDEROT_EVTLOG(0xbad0, rststs, l_hwts, h_hwts, l_ts, h_ts);
  2591. if (ctx->q_id == ROT_QUEUE_HIGH_PRIORITY) {
  2592. h_ts = (h_ts - 1) & SDE_REGDMA_SWTS_MASK;
  2593. l_ts &= SDE_REGDMA_SWTS_MASK;
  2594. } else {
  2595. l_ts = (l_ts - 1) & SDE_REGDMA_SWTS_MASK;
  2596. h_ts &= SDE_REGDMA_SWTS_MASK;
  2597. }
  2598. SDEROT_DBG("h_ts:0x%x, l_ts;0x%x\n", h_ts, l_ts);
  2599. SDEROT_EVTLOG(0x900d, h_ts, l_ts);
  2600. rot->last_hwts[ROT_QUEUE_LOW_PRIORITY] = l_ts;
  2601. rot->last_hwts[ROT_QUEUE_HIGH_PRIORITY] = h_ts;
  2602. rot->ops.update_ts(rot, ROT_QUEUE_HIGH_PRIORITY, h_ts);
  2603. rot->ops.update_ts(rot, ROT_QUEUE_LOW_PRIORITY, l_ts);
  2604. SDE_ROTREG_WRITE(rot->mdss_base, REGDMA_RESET_STATUS_REG, 0);
  2605. /* ensure write is issued to the rotator HW */
  2606. wmb();
  2607. }
  2608. if (rot->reset_hw_ts) {
  2609. SDEROT_EVTLOG(rot->last_hwts[ROT_QUEUE_LOW_PRIORITY],
  2610. rot->last_hwts[ROT_QUEUE_HIGH_PRIORITY]);
  2611. rot->ops.update_ts(rot, ROT_QUEUE_HIGH_PRIORITY,
  2612. rot->last_hwts[ROT_QUEUE_HIGH_PRIORITY]);
  2613. rot->ops.update_ts(rot, ROT_QUEUE_LOW_PRIORITY,
  2614. rot->last_hwts[ROT_QUEUE_LOW_PRIORITY]);
  2615. SDE_ROTREG_WRITE(rot->mdss_base, REGDMA_RESET_STATUS_REG, 0);
  2616. /* ensure write is issued to the rotator HW */
  2617. wmb();
  2618. rot->reset_hw_ts = false;
  2619. }
  2620. flags = (item->flags & SDE_ROTATION_FLIP_LR) ?
  2621. SDE_ROT_FLAG_FLIP_LR : 0;
  2622. flags |= (item->flags & SDE_ROTATION_FLIP_UD) ?
  2623. SDE_ROT_FLAG_FLIP_UD : 0;
  2624. flags |= (item->flags & SDE_ROTATION_90) ?
  2625. SDE_ROT_FLAG_ROT_90 : 0;
  2626. flags |= (item->flags & SDE_ROTATION_DEINTERLACE) ?
  2627. SDE_ROT_FLAG_DEINTERLACE : 0;
  2628. flags |= (item->flags & SDE_ROTATION_SECURE) ?
  2629. SDE_ROT_FLAG_SECURE_OVERLAY_SESSION : 0;
  2630. flags |= (item->flags & SDE_ROTATION_SECURE_CAMERA) ?
  2631. SDE_ROT_FLAG_SECURE_CAMERA_SESSION : 0;
  2632. sspp_cfg.img_width = item->input.width;
  2633. sspp_cfg.img_height = item->input.height;
  2634. sspp_cfg.fps = entry->perf->config.frame_rate;
  2635. sspp_cfg.bw = entry->perf->bw;
  2636. sspp_cfg.fmt = sde_get_format_params(item->input.format);
  2637. if (!sspp_cfg.fmt) {
  2638. SDEROT_ERR("null format\n");
  2639. ret = -EINVAL;
  2640. goto error;
  2641. }
  2642. sspp_cfg.src_rect = &item->src_rect;
  2643. sspp_cfg.data = &entry->src_buf;
  2644. sde_mdp_get_plane_sizes(sspp_cfg.fmt, item->input.width,
  2645. item->input.height, &sspp_cfg.src_plane,
  2646. 0, /* No bwc_mode */
  2647. (flags & SDE_ROT_FLAG_SOURCE_ROTATED_90) ?
  2648. true : false);
  2649. rot->ops.setup_rotator_fetchengine(ctx, ctx->q_id,
  2650. &sspp_cfg, danger_lut, safe_lut,
  2651. entry->dnsc_factor_w, entry->dnsc_factor_h, flags);
  2652. wb_cfg.img_width = item->output.width;
  2653. wb_cfg.img_height = item->output.height;
  2654. wb_cfg.fps = entry->perf->config.frame_rate;
  2655. wb_cfg.bw = entry->perf->bw;
  2656. wb_cfg.fmt = sde_get_format_params(item->output.format);
  2657. if (!wb_cfg.fmt) {
  2658. SDEROT_ERR("null format\n");
  2659. ret = -EINVAL;
  2660. goto error;
  2661. }
  2662. wb_cfg.dst_rect = &item->dst_rect;
  2663. wb_cfg.data = &entry->dst_buf;
  2664. sde_mdp_get_plane_sizes(wb_cfg.fmt, item->output.width,
  2665. item->output.height, &wb_cfg.dst_plane,
  2666. 0, /* No bwc_mode */
  2667. (flags & SDE_ROT_FLAG_ROT_90) ? true : false);
  2668. wb_cfg.v_downscale_factor = entry->dnsc_factor_h;
  2669. wb_cfg.h_downscale_factor = entry->dnsc_factor_w;
  2670. wb_cfg.prefill_bw = item->prefill_bw;
  2671. rot->ops.setup_rotator_wbengine(ctx, ctx->q_id, &wb_cfg, flags);
  2672. /* setup VA mapping for debugfs */
  2673. if (rot->dbgmem) {
  2674. sde_hw_rotator_map_vaddr(&ctx->src_dbgbuf,
  2675. &item->input,
  2676. &entry->src_buf);
  2677. sde_hw_rotator_map_vaddr(&ctx->dst_dbgbuf,
  2678. &item->output,
  2679. &entry->dst_buf);
  2680. }
  2681. SDEROT_EVTLOG(ctx->timestamp, flags,
  2682. item->input.width, item->input.height,
  2683. item->output.width, item->output.height,
  2684. entry->src_buf.p[0].addr, entry->dst_buf.p[0].addr,
  2685. item->input.format, item->output.format,
  2686. entry->perf->config.frame_rate);
  2687. /* initialize static vbif setting */
  2688. sde_mdp_init_vbif();
  2689. if (!ctx->sbuf_mode && mdata->default_ot_rd_limit) {
  2690. struct sde_mdp_set_ot_params ot_params;
  2691. memset(&ot_params, 0, sizeof(struct sde_mdp_set_ot_params));
  2692. ot_params.xin_id = mdata->vbif_xin_id[XIN_SSPP];
  2693. ot_params.num = 0; /* not used */
  2694. ot_params.width = entry->perf->config.input.width;
  2695. ot_params.height = entry->perf->config.input.height;
  2696. ot_params.fps = entry->perf->config.frame_rate;
  2697. ot_params.reg_off_vbif_lim_conf = MMSS_VBIF_RD_LIM_CONF;
  2698. ot_params.reg_off_mdp_clk_ctrl =
  2699. MMSS_VBIF_NRT_VBIF_CLK_FORCE_CTRL0;
  2700. ot_params.bit_off_mdp_clk_ctrl =
  2701. MMSS_VBIF_NRT_VBIF_CLK_FORCE_CTRL0_XIN0;
  2702. ot_params.fmt = ctx->is_traffic_shaping ?
  2703. SDE_PIX_FMT_ABGR_8888 :
  2704. entry->perf->config.input.format;
  2705. ot_params.rotsts_base = rot->mdss_base + ROTTOP_STATUS;
  2706. ot_params.rotsts_busy_mask = ROT_BUSY_BIT;
  2707. sde_mdp_set_ot_limit(&ot_params);
  2708. }
  2709. if (!ctx->sbuf_mode && mdata->default_ot_wr_limit) {
  2710. struct sde_mdp_set_ot_params ot_params;
  2711. memset(&ot_params, 0, sizeof(struct sde_mdp_set_ot_params));
  2712. ot_params.xin_id = mdata->vbif_xin_id[XIN_WRITEBACK];
  2713. ot_params.num = 0; /* not used */
  2714. ot_params.width = entry->perf->config.input.width;
  2715. ot_params.height = entry->perf->config.input.height;
  2716. ot_params.fps = entry->perf->config.frame_rate;
  2717. ot_params.reg_off_vbif_lim_conf = MMSS_VBIF_WR_LIM_CONF;
  2718. ot_params.reg_off_mdp_clk_ctrl =
  2719. MMSS_VBIF_NRT_VBIF_CLK_FORCE_CTRL0;
  2720. ot_params.bit_off_mdp_clk_ctrl =
  2721. MMSS_VBIF_NRT_VBIF_CLK_FORCE_CTRL0_XIN1;
  2722. ot_params.fmt = ctx->is_traffic_shaping ?
  2723. SDE_PIX_FMT_ABGR_8888 :
  2724. entry->perf->config.input.format;
  2725. ot_params.rotsts_base = rot->mdss_base + ROTTOP_STATUS;
  2726. ot_params.rotsts_busy_mask = ROT_BUSY_BIT;
  2727. sde_mdp_set_ot_limit(&ot_params);
  2728. }
  2729. if (test_bit(SDE_QOS_PER_PIPE_LUT, mdata->sde_qos_map)) {
  2730. u32 qos_lut = 0; /* low priority for nrt read client */
  2731. trace_rot_perf_set_qos_luts(mdata->vbif_xin_id[XIN_SSPP],
  2732. sspp_cfg.fmt->format, qos_lut,
  2733. sde_mdp_is_linear_format(sspp_cfg.fmt));
  2734. SDE_ROTREG_WRITE(rot->mdss_base, ROT_SSPP_CREQ_LUT, qos_lut);
  2735. }
  2736. /* VBIF QoS and other settings */
  2737. if (!ctx->sbuf_mode) {
  2738. if (mdata->parent_pdev)
  2739. sde_hw_rotator_vbif_rt_setting();
  2740. else
  2741. sde_hw_rotator_vbif_setting(rot);
  2742. }
  2743. return 0;
  2744. error:
  2745. sde_hw_rotator_free_rotctx(rot, ctx);
  2746. return ret;
  2747. }
  2748. /*
  2749. * sde_hw_rotator_cancel - cancel hw configuration for the given rotation entry
  2750. * @hw: Pointer to rotator resource
  2751. * @entry: Pointer to rotation entry
  2752. *
  2753. * This function cancels a previously configured rotation entry.
  2754. */
  2755. static int sde_hw_rotator_cancel(struct sde_rot_hw_resource *hw,
  2756. struct sde_rot_entry *entry)
  2757. {
  2758. struct sde_hw_rotator *rot;
  2759. struct sde_hw_rotator_resource_info *resinfo;
  2760. struct sde_hw_rotator_context *ctx;
  2761. unsigned long flags;
  2762. if (!hw || !entry) {
  2763. SDEROT_ERR("null hw resource/entry\n");
  2764. return -EINVAL;
  2765. }
  2766. resinfo = container_of(hw, struct sde_hw_rotator_resource_info, hw);
  2767. rot = resinfo->rot;
  2768. /* Lookup rotator context from session-id */
  2769. ctx = sde_hw_rotator_get_ctx(rot, entry->item.session_id,
  2770. entry->item.sequence_id, hw->wb_id);
  2771. if (!ctx) {
  2772. SDEROT_ERR("Cannot locate rotator ctx from sesison id:%d\n",
  2773. entry->item.session_id);
  2774. return -EINVAL;
  2775. }
  2776. spin_lock_irqsave(&rot->rotisr_lock, flags);
  2777. rot->ops.update_ts(rot, ctx->q_id, ctx->timestamp);
  2778. spin_unlock_irqrestore(&rot->rotisr_lock, flags);
  2779. SDEROT_EVTLOG(entry->item.session_id, ctx->timestamp);
  2780. if (rot->dbgmem) {
  2781. sde_hw_rotator_unmap_vaddr(&ctx->src_dbgbuf);
  2782. sde_hw_rotator_unmap_vaddr(&ctx->dst_dbgbuf);
  2783. }
  2784. /* Current rotator context job is finished, time to free up */
  2785. sde_hw_rotator_free_rotctx(rot, ctx);
  2786. return 0;
  2787. }
  2788. /*
  2789. * sde_hw_rotator_kickoff - kickoff processing on the given entry
  2790. * @hw: Pointer to rotator resource
  2791. * @entry: Pointer to rotation entry
  2792. */
  2793. static int sde_hw_rotator_kickoff(struct sde_rot_hw_resource *hw,
  2794. struct sde_rot_entry *entry)
  2795. {
  2796. struct sde_hw_rotator *rot;
  2797. struct sde_hw_rotator_resource_info *resinfo;
  2798. struct sde_hw_rotator_context *ctx;
  2799. if (!hw || !entry) {
  2800. SDEROT_ERR("null hw resource/entry\n");
  2801. return -EINVAL;
  2802. }
  2803. resinfo = container_of(hw, struct sde_hw_rotator_resource_info, hw);
  2804. rot = resinfo->rot;
  2805. /* Lookup rotator context from session-id */
  2806. ctx = sde_hw_rotator_get_ctx(rot, entry->item.session_id,
  2807. entry->item.sequence_id, hw->wb_id);
  2808. if (!ctx) {
  2809. SDEROT_ERR("Cannot locate rotator ctx from sesison id:%d\n",
  2810. entry->item.session_id);
  2811. return -EINVAL;
  2812. }
  2813. rot->ops.start_rotator(ctx, ctx->q_id);
  2814. return 0;
  2815. }
  2816. static int sde_hw_rotator_abort_kickoff(struct sde_rot_hw_resource *hw,
  2817. struct sde_rot_entry *entry)
  2818. {
  2819. struct sde_hw_rotator *rot;
  2820. struct sde_hw_rotator_resource_info *resinfo;
  2821. struct sde_hw_rotator_context *ctx;
  2822. unsigned long flags;
  2823. if (!hw || !entry) {
  2824. SDEROT_ERR("null hw resource/entry\n");
  2825. return -EINVAL;
  2826. }
  2827. resinfo = container_of(hw, struct sde_hw_rotator_resource_info, hw);
  2828. rot = resinfo->rot;
  2829. /* Lookup rotator context from session-id */
  2830. ctx = sde_hw_rotator_get_ctx(rot, entry->item.session_id,
  2831. entry->item.sequence_id, hw->wb_id);
  2832. if (!ctx) {
  2833. SDEROT_ERR("Cannot locate rotator ctx from sesison id:%d\n",
  2834. entry->item.session_id);
  2835. return -EINVAL;
  2836. }
  2837. spin_lock_irqsave(&rot->rotisr_lock, flags);
  2838. rot->ops.update_ts(rot, ctx->q_id, ctx->timestamp);
  2839. ctx->abort = true;
  2840. wake_up_all(&ctx->regdma_waitq);
  2841. spin_unlock_irqrestore(&rot->rotisr_lock, flags);
  2842. SDEROT_EVTLOG(entry->item.session_id, ctx->timestamp);
  2843. return 0;
  2844. }
  2845. /*
  2846. * sde_hw_rotator_wait4done - wait for completion notification
  2847. * @hw: Pointer to rotator resource
  2848. * @entry: Pointer to rotation entry
  2849. *
  2850. * This function blocks until the given entry is complete, error
  2851. * is detected, or timeout.
  2852. */
  2853. static int sde_hw_rotator_wait4done(struct sde_rot_hw_resource *hw,
  2854. struct sde_rot_entry *entry)
  2855. {
  2856. struct sde_hw_rotator *rot;
  2857. struct sde_hw_rotator_resource_info *resinfo;
  2858. struct sde_hw_rotator_context *ctx;
  2859. int ret;
  2860. if (!hw || !entry) {
  2861. SDEROT_ERR("null hw resource/entry\n");
  2862. return -EINVAL;
  2863. }
  2864. resinfo = container_of(hw, struct sde_hw_rotator_resource_info, hw);
  2865. rot = resinfo->rot;
  2866. /* Lookup rotator context from session-id */
  2867. ctx = sde_hw_rotator_get_ctx(rot, entry->item.session_id,
  2868. entry->item.sequence_id, hw->wb_id);
  2869. if (!ctx) {
  2870. SDEROT_ERR("Cannot locate rotator ctx from sesison id:%d\n",
  2871. entry->item.session_id);
  2872. return -EINVAL;
  2873. }
  2874. ret = rot->ops.wait_rotator_done(ctx, ctx->q_id, 0);
  2875. if (rot->dbgmem) {
  2876. sde_hw_rotator_unmap_vaddr(&ctx->src_dbgbuf);
  2877. sde_hw_rotator_unmap_vaddr(&ctx->dst_dbgbuf);
  2878. }
  2879. /* Current rotator context job is finished, time to free up*/
  2880. sde_hw_rotator_free_rotctx(rot, ctx);
  2881. return ret;
  2882. }
  2883. /*
  2884. * sde_rotator_hw_rev_init - setup feature and/or capability bitmask
  2885. * @rot: Pointer to hw rotator
  2886. *
  2887. * This function initializes feature and/or capability bitmask based on
  2888. * h/w version read from the device.
  2889. */
  2890. static int sde_rotator_hw_rev_init(struct sde_hw_rotator *rot)
  2891. {
  2892. struct sde_rot_data_type *mdata = sde_rot_get_mdata();
  2893. u32 hw_version;
  2894. if (!mdata) {
  2895. SDEROT_ERR("null rotator data\n");
  2896. return -EINVAL;
  2897. }
  2898. hw_version = SDE_ROTREG_READ(rot->mdss_base, ROTTOP_HW_VERSION);
  2899. SDEROT_DBG("hw version %8.8x\n", hw_version);
  2900. clear_bit(SDE_QOS_PER_PIPE_IB, mdata->sde_qos_map);
  2901. set_bit(SDE_QOS_OVERHEAD_FACTOR, mdata->sde_qos_map);
  2902. set_bit(SDE_QOS_OTLIM, mdata->sde_qos_map);
  2903. set_bit(SDE_QOS_PER_PIPE_LUT, mdata->sde_qos_map);
  2904. clear_bit(SDE_QOS_SIMPLIFIED_PREFILL, mdata->sde_qos_map);
  2905. set_bit(SDE_CAPS_R3_WB, mdata->sde_caps_map);
  2906. /* features exposed via rotator top h/w version */
  2907. if (hw_version != SDE_ROT_TYPE_V1_0) {
  2908. SDEROT_DBG("Supporting 1.5 downscale for SDE Rotator\n");
  2909. set_bit(SDE_CAPS_R3_1P5_DOWNSCALE, mdata->sde_caps_map);
  2910. }
  2911. set_bit(SDE_CAPS_SEC_ATTACH_DETACH_SMMU, mdata->sde_caps_map);
  2912. mdata->nrt_vbif_dbg_bus = nrt_vbif_dbg_bus_r3;
  2913. mdata->nrt_vbif_dbg_bus_size =
  2914. ARRAY_SIZE(nrt_vbif_dbg_bus_r3);
  2915. mdata->rot_dbg_bus = rot_dbgbus_r3;
  2916. mdata->rot_dbg_bus_size = ARRAY_SIZE(rot_dbgbus_r3);
  2917. mdata->regdump = sde_rot_r3_regdump;
  2918. mdata->regdump_size = ARRAY_SIZE(sde_rot_r3_regdump);
  2919. SDE_ROTREG_WRITE(rot->mdss_base, REGDMA_TIMESTAMP_REG, 0);
  2920. /* features exposed via mdss h/w version */
  2921. if (IS_SDE_MAJOR_MINOR_SAME(mdata->mdss_version, SDE_MDP_HW_REV_600)) {
  2922. SDEROT_DBG("Supporting sys cache inline rotation\n");
  2923. set_bit(SDE_CAPS_SBUF_1, mdata->sde_caps_map);
  2924. set_bit(SDE_CAPS_UBWC_4, mdata->sde_caps_map);
  2925. set_bit(SDE_CAPS_PARTIALWR, mdata->sde_caps_map);
  2926. set_bit(SDE_CAPS_HW_TIMESTAMP, mdata->sde_caps_map);
  2927. rot->inpixfmts[SDE_ROTATOR_MODE_OFFLINE] =
  2928. sde_hw_rotator_v4_inpixfmts;
  2929. rot->num_inpixfmt[SDE_ROTATOR_MODE_OFFLINE] =
  2930. ARRAY_SIZE(sde_hw_rotator_v4_inpixfmts);
  2931. rot->outpixfmts[SDE_ROTATOR_MODE_OFFLINE] =
  2932. sde_hw_rotator_v4_outpixfmts;
  2933. rot->num_outpixfmt[SDE_ROTATOR_MODE_OFFLINE] =
  2934. ARRAY_SIZE(sde_hw_rotator_v4_outpixfmts);
  2935. rot->inpixfmts[SDE_ROTATOR_MODE_SBUF] =
  2936. sde_hw_rotator_v4_inpixfmts_sbuf;
  2937. rot->num_inpixfmt[SDE_ROTATOR_MODE_SBUF] =
  2938. ARRAY_SIZE(sde_hw_rotator_v4_inpixfmts_sbuf);
  2939. rot->outpixfmts[SDE_ROTATOR_MODE_SBUF] =
  2940. sde_hw_rotator_v4_outpixfmts_sbuf;
  2941. rot->num_outpixfmt[SDE_ROTATOR_MODE_SBUF] =
  2942. ARRAY_SIZE(sde_hw_rotator_v4_outpixfmts_sbuf);
  2943. rot->downscale_caps =
  2944. "LINEAR/1.5/2/4/8/16/32/64 TILE/1.5/2/4 TP10/1.5/2";
  2945. } else if (IS_SDE_MAJOR_MINOR_SAME(mdata->mdss_version,
  2946. SDE_MDP_HW_REV_500)) {
  2947. SDEROT_DBG("Supporting sys cache inline rotation\n");
  2948. set_bit(SDE_CAPS_SBUF_1, mdata->sde_caps_map);
  2949. set_bit(SDE_CAPS_UBWC_3, mdata->sde_caps_map);
  2950. set_bit(SDE_CAPS_PARTIALWR, mdata->sde_caps_map);
  2951. set_bit(SDE_CAPS_HW_TIMESTAMP, mdata->sde_caps_map);
  2952. rot->inpixfmts[SDE_ROTATOR_MODE_OFFLINE] =
  2953. sde_hw_rotator_v4_inpixfmts;
  2954. rot->num_inpixfmt[SDE_ROTATOR_MODE_OFFLINE] =
  2955. ARRAY_SIZE(sde_hw_rotator_v4_inpixfmts);
  2956. rot->outpixfmts[SDE_ROTATOR_MODE_OFFLINE] =
  2957. sde_hw_rotator_v4_outpixfmts;
  2958. rot->num_outpixfmt[SDE_ROTATOR_MODE_OFFLINE] =
  2959. ARRAY_SIZE(sde_hw_rotator_v4_outpixfmts);
  2960. rot->inpixfmts[SDE_ROTATOR_MODE_SBUF] =
  2961. sde_hw_rotator_v4_inpixfmts_sbuf;
  2962. rot->num_inpixfmt[SDE_ROTATOR_MODE_SBUF] =
  2963. ARRAY_SIZE(sde_hw_rotator_v4_inpixfmts_sbuf);
  2964. rot->outpixfmts[SDE_ROTATOR_MODE_SBUF] =
  2965. sde_hw_rotator_v4_outpixfmts_sbuf;
  2966. rot->num_outpixfmt[SDE_ROTATOR_MODE_SBUF] =
  2967. ARRAY_SIZE(sde_hw_rotator_v4_outpixfmts_sbuf);
  2968. rot->downscale_caps =
  2969. "LINEAR/1.5/2/4/8/16/32/64 TILE/1.5/2/4 TP10/1.5/2";
  2970. } else if (IS_SDE_MAJOR_MINOR_SAME(mdata->mdss_version,
  2971. SDE_MDP_HW_REV_530) ||
  2972. IS_SDE_MAJOR_MINOR_SAME(mdata->mdss_version,
  2973. SDE_MDP_HW_REV_520)) {
  2974. SDEROT_DBG("Supporting sys cache inline rotation\n");
  2975. set_bit(SDE_CAPS_SBUF_1, mdata->sde_caps_map);
  2976. set_bit(SDE_CAPS_UBWC_2, mdata->sde_caps_map);
  2977. set_bit(SDE_CAPS_PARTIALWR, mdata->sde_caps_map);
  2978. set_bit(SDE_CAPS_HW_TIMESTAMP, mdata->sde_caps_map);
  2979. rot->inpixfmts[SDE_ROTATOR_MODE_OFFLINE] =
  2980. sde_hw_rotator_v4_inpixfmts;
  2981. rot->num_inpixfmt[SDE_ROTATOR_MODE_OFFLINE] =
  2982. ARRAY_SIZE(sde_hw_rotator_v4_inpixfmts);
  2983. rot->outpixfmts[SDE_ROTATOR_MODE_OFFLINE] =
  2984. sde_hw_rotator_v4_outpixfmts;
  2985. rot->num_outpixfmt[SDE_ROTATOR_MODE_OFFLINE] =
  2986. ARRAY_SIZE(sde_hw_rotator_v4_outpixfmts);
  2987. rot->inpixfmts[SDE_ROTATOR_MODE_SBUF] =
  2988. sde_hw_rotator_v4_inpixfmts_sbuf;
  2989. rot->num_inpixfmt[SDE_ROTATOR_MODE_SBUF] =
  2990. ARRAY_SIZE(sde_hw_rotator_v4_inpixfmts_sbuf);
  2991. rot->outpixfmts[SDE_ROTATOR_MODE_SBUF] =
  2992. sde_hw_rotator_v4_outpixfmts_sbuf;
  2993. rot->num_outpixfmt[SDE_ROTATOR_MODE_SBUF] =
  2994. ARRAY_SIZE(sde_hw_rotator_v4_outpixfmts_sbuf);
  2995. rot->downscale_caps =
  2996. "LINEAR/1.5/2/4/8/16/32/64 TILE/1.5/2/4 TP10/1.5/2";
  2997. } else if (IS_SDE_MAJOR_MINOR_SAME(mdata->mdss_version,
  2998. SDE_MDP_HW_REV_540)) {
  2999. SDEROT_DBG("Sys cache inline rotation not supported\n");
  3000. set_bit(SDE_CAPS_UBWC_2, mdata->sde_caps_map);
  3001. set_bit(SDE_CAPS_PARTIALWR, mdata->sde_caps_map);
  3002. set_bit(SDE_CAPS_HW_TIMESTAMP, mdata->sde_caps_map);
  3003. rot->inpixfmts[SDE_ROTATOR_MODE_OFFLINE] =
  3004. sde_hw_rotator_v4_inpixfmts;
  3005. rot->num_inpixfmt[SDE_ROTATOR_MODE_OFFLINE] =
  3006. ARRAY_SIZE(sde_hw_rotator_v4_inpixfmts);
  3007. rot->outpixfmts[SDE_ROTATOR_MODE_OFFLINE] =
  3008. sde_hw_rotator_v4_outpixfmts;
  3009. rot->num_outpixfmt[SDE_ROTATOR_MODE_OFFLINE] =
  3010. ARRAY_SIZE(sde_hw_rotator_v4_outpixfmts);
  3011. rot->downscale_caps =
  3012. "LINEAR/1.5/2/4/8/16/32/64 TILE/1.5/2/4 TP10/1.5/2";
  3013. } else if (IS_SDE_MAJOR_MINOR_SAME(mdata->mdss_version,
  3014. SDE_MDP_HW_REV_400) ||
  3015. IS_SDE_MAJOR_MINOR_SAME(mdata->mdss_version,
  3016. SDE_MDP_HW_REV_410)) {
  3017. SDEROT_DBG("Supporting sys cache inline rotation\n");
  3018. set_bit(SDE_CAPS_SBUF_1, mdata->sde_caps_map);
  3019. set_bit(SDE_CAPS_UBWC_2, mdata->sde_caps_map);
  3020. set_bit(SDE_CAPS_PARTIALWR, mdata->sde_caps_map);
  3021. rot->inpixfmts[SDE_ROTATOR_MODE_OFFLINE] =
  3022. sde_hw_rotator_v4_inpixfmts;
  3023. rot->num_inpixfmt[SDE_ROTATOR_MODE_OFFLINE] =
  3024. ARRAY_SIZE(sde_hw_rotator_v4_inpixfmts);
  3025. rot->outpixfmts[SDE_ROTATOR_MODE_OFFLINE] =
  3026. sde_hw_rotator_v4_outpixfmts;
  3027. rot->num_outpixfmt[SDE_ROTATOR_MODE_OFFLINE] =
  3028. ARRAY_SIZE(sde_hw_rotator_v4_outpixfmts);
  3029. rot->inpixfmts[SDE_ROTATOR_MODE_SBUF] =
  3030. sde_hw_rotator_v4_inpixfmts_sbuf;
  3031. rot->num_inpixfmt[SDE_ROTATOR_MODE_SBUF] =
  3032. ARRAY_SIZE(sde_hw_rotator_v4_inpixfmts_sbuf);
  3033. rot->outpixfmts[SDE_ROTATOR_MODE_SBUF] =
  3034. sde_hw_rotator_v4_outpixfmts_sbuf;
  3035. rot->num_outpixfmt[SDE_ROTATOR_MODE_SBUF] =
  3036. ARRAY_SIZE(sde_hw_rotator_v4_outpixfmts_sbuf);
  3037. rot->downscale_caps =
  3038. "LINEAR/1.5/2/4/8/16/32/64 TILE/1.5/2/4 TP10/1.5/2";
  3039. } else if (IS_SDE_MAJOR_MINOR_SAME(mdata->mdss_version,
  3040. SDE_MDP_HW_REV_630)) {
  3041. SDEROT_DBG("Sys cache inline rotation not supported\n");
  3042. set_bit(SDE_CAPS_PARTIALWR, mdata->sde_caps_map);
  3043. set_bit(SDE_CAPS_HW_TIMESTAMP, mdata->sde_caps_map);
  3044. rot->inpixfmts[SDE_ROTATOR_MODE_OFFLINE] =
  3045. sde_hw_rotator_v4_inpixfmts;
  3046. rot->num_inpixfmt[SDE_ROTATOR_MODE_OFFLINE] =
  3047. ARRAY_SIZE(sde_hw_rotator_v4_inpixfmts);
  3048. rot->outpixfmts[SDE_ROTATOR_MODE_OFFLINE] =
  3049. sde_hw_rotator_v4_outpixfmts;
  3050. rot->num_outpixfmt[SDE_ROTATOR_MODE_OFFLINE] =
  3051. ARRAY_SIZE(sde_hw_rotator_v4_outpixfmts);
  3052. rot->downscale_caps =
  3053. "LINEAR/1.5/2/4/8/16/32/64 TILE/1.5/2/4 TP10/1.5/2";
  3054. } else if (IS_SDE_MAJOR_MINOR_SAME(mdata->mdss_version,
  3055. SDE_MDP_HW_REV_660)) {
  3056. SDEROT_DBG("Sys cache inline rotation not supported\n");
  3057. set_bit(SDE_CAPS_PARTIALWR, mdata->sde_caps_map);
  3058. set_bit(SDE_CAPS_HW_TIMESTAMP, mdata->sde_caps_map);
  3059. rot->inpixfmts[SDE_ROTATOR_MODE_OFFLINE] =
  3060. sde_hw_rotator_v4_inpixfmts;
  3061. rot->num_inpixfmt[SDE_ROTATOR_MODE_OFFLINE] =
  3062. ARRAY_SIZE(sde_hw_rotator_v4_inpixfmts);
  3063. rot->outpixfmts[SDE_ROTATOR_MODE_OFFLINE] =
  3064. sde_hw_rotator_v4_outpixfmts;
  3065. rot->num_outpixfmt[SDE_ROTATOR_MODE_OFFLINE] =
  3066. ARRAY_SIZE(sde_hw_rotator_v4_outpixfmts);
  3067. rot->downscale_caps =
  3068. "LINEAR/1.5/2/4/8/16/32/64 TILE/1.5/2/4 TP10/1.5/2";
  3069. } else {
  3070. rot->inpixfmts[SDE_ROTATOR_MODE_OFFLINE] =
  3071. sde_hw_rotator_v3_inpixfmts;
  3072. rot->num_inpixfmt[SDE_ROTATOR_MODE_OFFLINE] =
  3073. ARRAY_SIZE(sde_hw_rotator_v3_inpixfmts);
  3074. rot->outpixfmts[SDE_ROTATOR_MODE_OFFLINE] =
  3075. sde_hw_rotator_v3_outpixfmts;
  3076. rot->num_outpixfmt[SDE_ROTATOR_MODE_OFFLINE] =
  3077. ARRAY_SIZE(sde_hw_rotator_v3_outpixfmts);
  3078. rot->downscale_caps = (hw_version == SDE_ROT_TYPE_V1_0) ?
  3079. "LINEAR/2/4/8/16/32/64 TILE/2/4 TP10/2" :
  3080. "LINEAR/1.5/2/4/8/16/32/64 TILE/1.5/2/4 TP10/1.5/2";
  3081. }
  3082. return 0;
  3083. }
  3084. /*
  3085. * sde_hw_rotator_validate_entry - validate rotation entry
  3086. * @mgr: Pointer to rotator manager
  3087. * @entry: Pointer to rotation entry
  3088. *
  3089. * This function validates the given rotation entry and provides possible
  3090. * fixup (future improvement) if available. This function returns 0 if
  3091. * the entry is valid, and returns error code otherwise.
  3092. */
  3093. static int sde_hw_rotator_validate_entry(struct sde_rot_mgr *mgr,
  3094. struct sde_rot_entry *entry)
  3095. {
  3096. struct sde_rot_data_type *mdata = sde_rot_get_mdata();
  3097. struct sde_hw_rotator *hw_data;
  3098. int ret = 0;
  3099. u16 src_w, src_h, dst_w, dst_h;
  3100. struct sde_rotation_item *item = &entry->item;
  3101. struct sde_mdp_format_params *fmt;
  3102. if (!mgr || !entry || !mgr->hw_data) {
  3103. SDEROT_ERR("invalid parameters\n");
  3104. return -EINVAL;
  3105. }
  3106. hw_data = mgr->hw_data;
  3107. if (hw_data->maxlinewidth < item->src_rect.w) {
  3108. SDEROT_ERR("invalid src width %u\n", item->src_rect.w);
  3109. return -EINVAL;
  3110. }
  3111. src_w = item->src_rect.w;
  3112. src_h = item->src_rect.h;
  3113. if (item->flags & SDE_ROTATION_90) {
  3114. dst_w = item->dst_rect.h;
  3115. dst_h = item->dst_rect.w;
  3116. } else {
  3117. dst_w = item->dst_rect.w;
  3118. dst_h = item->dst_rect.h;
  3119. }
  3120. entry->dnsc_factor_w = 0;
  3121. entry->dnsc_factor_h = 0;
  3122. if (item->output.sbuf &&
  3123. !test_bit(SDE_CAPS_SBUF_1, mdata->sde_caps_map)) {
  3124. SDEROT_ERR("stream buffer not supported\n");
  3125. return -EINVAL;
  3126. }
  3127. if ((src_w != dst_w) || (src_h != dst_h)) {
  3128. if (!dst_w || !dst_h) {
  3129. SDEROT_DBG("zero output width/height not support\n");
  3130. ret = -EINVAL;
  3131. goto dnsc_err;
  3132. }
  3133. if ((src_w % dst_w) || (src_h % dst_h)) {
  3134. SDEROT_DBG("non integral scale not support\n");
  3135. ret = -EINVAL;
  3136. goto dnsc_1p5_check;
  3137. }
  3138. entry->dnsc_factor_w = src_w / dst_w;
  3139. if ((entry->dnsc_factor_w & (entry->dnsc_factor_w - 1)) ||
  3140. (entry->dnsc_factor_w > 64)) {
  3141. SDEROT_DBG("non power-of-2 w_scale not support\n");
  3142. ret = -EINVAL;
  3143. goto dnsc_err;
  3144. }
  3145. entry->dnsc_factor_h = src_h / dst_h;
  3146. if ((entry->dnsc_factor_h & (entry->dnsc_factor_h - 1)) ||
  3147. (entry->dnsc_factor_h > 64)) {
  3148. SDEROT_DBG("non power-of-2 h_scale not support\n");
  3149. ret = -EINVAL;
  3150. goto dnsc_err;
  3151. }
  3152. }
  3153. fmt = sde_get_format_params(item->output.format);
  3154. /*
  3155. * Rotator downscale support max 4 times for UBWC format and
  3156. * max 2 times for TP10/TP10_UBWC format
  3157. */
  3158. if (sde_mdp_is_ubwc_format(fmt) && (entry->dnsc_factor_h > 4)) {
  3159. SDEROT_DBG("max downscale for UBWC format is 4\n");
  3160. ret = -EINVAL;
  3161. goto dnsc_err;
  3162. }
  3163. if (sde_mdp_is_tp10_format(fmt) && (entry->dnsc_factor_h > 2)) {
  3164. SDEROT_DBG("downscale with TP10 cannot be more than 2\n");
  3165. ret = -EINVAL;
  3166. }
  3167. goto dnsc_err;
  3168. dnsc_1p5_check:
  3169. /* Check for 1.5 downscale that only applies to V2 HW */
  3170. if (test_bit(SDE_CAPS_R3_1P5_DOWNSCALE, mdata->sde_caps_map)) {
  3171. entry->dnsc_factor_w = src_w / dst_w;
  3172. if ((entry->dnsc_factor_w != 1) ||
  3173. ((dst_w * 3) != (src_w * 2))) {
  3174. SDEROT_DBG(
  3175. "No supporting non 1.5 downscale width ratio, src_w:%d, dst_w:%d\n",
  3176. src_w, dst_w);
  3177. ret = -EINVAL;
  3178. goto dnsc_err;
  3179. }
  3180. entry->dnsc_factor_h = src_h / dst_h;
  3181. if ((entry->dnsc_factor_h != 1) ||
  3182. ((dst_h * 3) != (src_h * 2))) {
  3183. SDEROT_DBG(
  3184. "Not supporting non 1.5 downscale height ratio, src_h:%d, dst_h:%d\n",
  3185. src_h, dst_h);
  3186. ret = -EINVAL;
  3187. goto dnsc_err;
  3188. }
  3189. ret = 0;
  3190. }
  3191. dnsc_err:
  3192. /* Downscaler does not support asymmetrical dnsc */
  3193. if (entry->dnsc_factor_w != entry->dnsc_factor_h) {
  3194. SDEROT_DBG("asymmetric downscale not support\n");
  3195. ret = -EINVAL;
  3196. }
  3197. if (ret) {
  3198. entry->dnsc_factor_w = 0;
  3199. entry->dnsc_factor_h = 0;
  3200. }
  3201. return ret;
  3202. }
  3203. /*
  3204. * sde_hw_rotator_show_caps - output capability info to sysfs 'caps' file
  3205. * @mgr: Pointer to rotator manager
  3206. * @attr: Pointer to device attribute interface
  3207. * @buf: Pointer to output buffer
  3208. * @len: Length of output buffer
  3209. */
  3210. static ssize_t sde_hw_rotator_show_caps(struct sde_rot_mgr *mgr,
  3211. struct device_attribute *attr, char *buf, ssize_t len)
  3212. {
  3213. struct sde_hw_rotator *hw_data;
  3214. struct sde_rot_data_type *mdata = sde_rot_get_mdata();
  3215. int cnt = 0;
  3216. if (!mgr || !buf)
  3217. return 0;
  3218. hw_data = mgr->hw_data;
  3219. #define SPRINT(fmt, ...) \
  3220. (cnt += scnprintf(buf + cnt, len - cnt, fmt, ##__VA_ARGS__))
  3221. /* insert capabilities here */
  3222. if (test_bit(SDE_CAPS_R3_1P5_DOWNSCALE, mdata->sde_caps_map))
  3223. SPRINT("min_downscale=1.5\n");
  3224. else
  3225. SPRINT("min_downscale=2.0\n");
  3226. SPRINT("downscale_compression=1\n");
  3227. if (hw_data->downscale_caps)
  3228. SPRINT("downscale_ratios=%s\n", hw_data->downscale_caps);
  3229. SPRINT("max_line_width=%d\n", sde_rotator_get_maxlinewidth(mgr));
  3230. #undef SPRINT
  3231. return cnt;
  3232. }
  3233. /*
  3234. * sde_hw_rotator_show_state - output state info to sysfs 'state' file
  3235. * @mgr: Pointer to rotator manager
  3236. * @attr: Pointer to device attribute interface
  3237. * @buf: Pointer to output buffer
  3238. * @len: Length of output buffer
  3239. */
  3240. static ssize_t sde_hw_rotator_show_state(struct sde_rot_mgr *mgr,
  3241. struct device_attribute *attr, char *buf, ssize_t len)
  3242. {
  3243. struct sde_hw_rotator *rot;
  3244. struct sde_hw_rotator_context *ctx;
  3245. int cnt = 0;
  3246. int num_active = 0;
  3247. int i, j;
  3248. if (!mgr || !buf) {
  3249. SDEROT_ERR("null parameters\n");
  3250. return 0;
  3251. }
  3252. rot = mgr->hw_data;
  3253. #define SPRINT(fmt, ...) \
  3254. (cnt += scnprintf(buf + cnt, len - cnt, fmt, ##__VA_ARGS__))
  3255. if (rot) {
  3256. SPRINT("rot_mode=%d\n", rot->mode);
  3257. SPRINT("irq_num=%d\n", rot->irq_num);
  3258. if (rot->mode == ROT_REGDMA_OFF) {
  3259. SPRINT("max_active=1\n");
  3260. SPRINT("num_active=%d\n", rot->rotCtx[0][0] ? 1 : 0);
  3261. } else {
  3262. for (i = 0; i < ROT_QUEUE_MAX; i++) {
  3263. for (j = 0; j < SDE_HW_ROT_REGDMA_TOTAL_CTX;
  3264. j++) {
  3265. ctx = rot->rotCtx[i][j];
  3266. if (ctx) {
  3267. SPRINT(
  3268. "rotCtx[%d][%d]:%pK\n",
  3269. i, j, ctx);
  3270. ++num_active;
  3271. }
  3272. }
  3273. }
  3274. SPRINT("max_active=%d\n", SDE_HW_ROT_REGDMA_TOTAL_CTX);
  3275. SPRINT("num_active=%d\n", num_active);
  3276. }
  3277. }
  3278. #undef SPRINT
  3279. return cnt;
  3280. }
  3281. /*
  3282. * sde_hw_rotator_get_pixfmt - get the indexed pixel format
  3283. * @mgr: Pointer to rotator manager
  3284. * @index: index of pixel format
  3285. * @input: true for input port; false for output port
  3286. * @mode: operating mode
  3287. */
  3288. static u32 sde_hw_rotator_get_pixfmt(struct sde_rot_mgr *mgr,
  3289. int index, bool input, u32 mode)
  3290. {
  3291. struct sde_hw_rotator *rot;
  3292. if (!mgr || !mgr->hw_data) {
  3293. SDEROT_ERR("null parameters\n");
  3294. return 0;
  3295. }
  3296. rot = mgr->hw_data;
  3297. if (mode >= SDE_ROTATOR_MODE_MAX) {
  3298. SDEROT_ERR("invalid rotator mode %d\n", mode);
  3299. return 0;
  3300. }
  3301. if (input) {
  3302. if ((index < rot->num_inpixfmt[mode]) && rot->inpixfmts[mode])
  3303. return rot->inpixfmts[mode][index];
  3304. else
  3305. return 0;
  3306. } else {
  3307. if ((index < rot->num_outpixfmt[mode]) && rot->outpixfmts[mode])
  3308. return rot->outpixfmts[mode][index];
  3309. else
  3310. return 0;
  3311. }
  3312. }
  3313. /*
  3314. * sde_hw_rotator_is_valid_pixfmt - verify if the given pixel format is valid
  3315. * @mgr: Pointer to rotator manager
  3316. * @pixfmt: pixel format to be verified
  3317. * @input: true for input port; false for output port
  3318. * @mode: operating mode
  3319. */
  3320. static int sde_hw_rotator_is_valid_pixfmt(struct sde_rot_mgr *mgr, u32 pixfmt,
  3321. bool input, u32 mode)
  3322. {
  3323. struct sde_hw_rotator *rot;
  3324. const u32 *pixfmts;
  3325. u32 num_pixfmt;
  3326. int i;
  3327. if (!mgr || !mgr->hw_data) {
  3328. SDEROT_ERR("null parameters\n");
  3329. return false;
  3330. }
  3331. rot = mgr->hw_data;
  3332. if (mode >= SDE_ROTATOR_MODE_MAX) {
  3333. SDEROT_ERR("invalid rotator mode %d\n", mode);
  3334. return false;
  3335. }
  3336. if (input) {
  3337. pixfmts = rot->inpixfmts[mode];
  3338. num_pixfmt = rot->num_inpixfmt[mode];
  3339. } else {
  3340. pixfmts = rot->outpixfmts[mode];
  3341. num_pixfmt = rot->num_outpixfmt[mode];
  3342. }
  3343. if (!pixfmts || !num_pixfmt) {
  3344. SDEROT_ERR("invalid pixel format tables\n");
  3345. return false;
  3346. }
  3347. for (i = 0; i < num_pixfmt; i++)
  3348. if (pixfmts[i] == pixfmt)
  3349. return true;
  3350. return false;
  3351. }
  3352. /*
  3353. * sde_hw_rotator_get_downscale_caps - get scaling capability string
  3354. * @mgr: Pointer to rotator manager
  3355. * @caps: Pointer to capability string buffer; NULL to return maximum length
  3356. * @len: length of capability string buffer
  3357. * return: length of capability string
  3358. */
  3359. static int sde_hw_rotator_get_downscale_caps(struct sde_rot_mgr *mgr,
  3360. char *caps, int len)
  3361. {
  3362. struct sde_hw_rotator *rot;
  3363. int rc = 0;
  3364. if (!mgr || !mgr->hw_data) {
  3365. SDEROT_ERR("null parameters\n");
  3366. return -EINVAL;
  3367. }
  3368. rot = mgr->hw_data;
  3369. if (rot->downscale_caps) {
  3370. if (caps)
  3371. rc = snprintf(caps, len, "%s", rot->downscale_caps);
  3372. else
  3373. rc = strlen(rot->downscale_caps);
  3374. }
  3375. return rc;
  3376. }
  3377. /*
  3378. * sde_hw_rotator_get_maxlinewidth - get maximum line width supported
  3379. * @mgr: Pointer to rotator manager
  3380. * return: maximum line width supported by hardware
  3381. */
  3382. static int sde_hw_rotator_get_maxlinewidth(struct sde_rot_mgr *mgr)
  3383. {
  3384. struct sde_hw_rotator *rot;
  3385. if (!mgr || !mgr->hw_data) {
  3386. SDEROT_ERR("null parameters\n");
  3387. return -EINVAL;
  3388. }
  3389. rot = mgr->hw_data;
  3390. return rot->maxlinewidth;
  3391. }
  3392. /*
  3393. * sde_hw_rotator_dump_status - dump status to debug output
  3394. * @mgr: Pointer to rotator manager
  3395. * return: none
  3396. */
  3397. static void sde_hw_rotator_dump_status(struct sde_rot_mgr *mgr)
  3398. {
  3399. if (!mgr || !mgr->hw_data) {
  3400. SDEROT_ERR("null parameters\n");
  3401. return;
  3402. }
  3403. _sde_hw_rotator_dump_status(mgr->hw_data, NULL);
  3404. }
  3405. /*
  3406. * sde_hw_rotator_parse_dt - parse r3 specific device tree settings
  3407. * @hw_data: Pointer to rotator hw
  3408. * @dev: Pointer to platform device
  3409. */
  3410. static int sde_hw_rotator_parse_dt(struct sde_hw_rotator *hw_data,
  3411. struct platform_device *dev)
  3412. {
  3413. int ret = 0;
  3414. u32 data;
  3415. if (!hw_data || !dev)
  3416. return -EINVAL;
  3417. ret = of_property_read_u32(dev->dev.of_node, "qcom,mdss-rot-mode",
  3418. &data);
  3419. if (ret) {
  3420. SDEROT_DBG("default to regdma off\n");
  3421. ret = 0;
  3422. hw_data->mode = ROT_REGDMA_OFF;
  3423. } else if (data < ROT_REGDMA_MAX) {
  3424. SDEROT_DBG("set to regdma mode %d\n", data);
  3425. hw_data->mode = data;
  3426. } else {
  3427. SDEROT_ERR("regdma mode out of range. default to regdma off\n");
  3428. hw_data->mode = ROT_REGDMA_OFF;
  3429. }
  3430. ret = of_property_read_u32(dev->dev.of_node,
  3431. "qcom,mdss-highest-bank-bit", &data);
  3432. if (ret) {
  3433. SDEROT_DBG("default to A5X bank\n");
  3434. ret = 0;
  3435. hw_data->highest_bank = 2;
  3436. } else {
  3437. SDEROT_DBG("set highest bank bit to %d\n", data);
  3438. hw_data->highest_bank = data;
  3439. }
  3440. ret = of_property_read_u32(dev->dev.of_node,
  3441. "qcom,sde-ubwc-malsize", &data);
  3442. if (ret) {
  3443. ret = 0;
  3444. hw_data->ubwc_malsize = DEFAULT_UBWC_MALSIZE;
  3445. } else {
  3446. SDEROT_DBG("set ubwc malsize to %d\n", data);
  3447. hw_data->ubwc_malsize = data;
  3448. }
  3449. ret = of_property_read_u32(dev->dev.of_node,
  3450. "qcom,sde-ubwc_swizzle", &data);
  3451. if (ret) {
  3452. ret = 0;
  3453. hw_data->ubwc_swizzle = DEFAULT_UBWC_SWIZZLE;
  3454. } else {
  3455. SDEROT_DBG("set ubwc swizzle to %d\n", data);
  3456. hw_data->ubwc_swizzle = data;
  3457. }
  3458. ret = of_property_read_u32(dev->dev.of_node,
  3459. "qcom,mdss-sbuf-headroom", &data);
  3460. if (ret) {
  3461. ret = 0;
  3462. hw_data->sbuf_headroom = DEFAULT_SBUF_HEADROOM;
  3463. } else {
  3464. SDEROT_DBG("set sbuf headroom to %d\n", data);
  3465. hw_data->sbuf_headroom = data;
  3466. }
  3467. ret = of_property_read_u32(dev->dev.of_node,
  3468. "qcom,mdss-rot-linewidth", &data);
  3469. if (ret) {
  3470. ret = 0;
  3471. hw_data->maxlinewidth = DEFAULT_MAXLINEWIDTH;
  3472. } else {
  3473. SDEROT_DBG("set mdss-rot-linewidth to %d\n", data);
  3474. hw_data->maxlinewidth = data;
  3475. }
  3476. return ret;
  3477. }
  3478. /*
  3479. * sde_rotator_r3_init - initialize the r3 module
  3480. * @mgr: Pointer to rotator manager
  3481. *
  3482. * This function setup r3 callback functions, parses r3 specific
  3483. * device tree settings, installs r3 specific interrupt handler,
  3484. * as well as initializes r3 internal data structure.
  3485. */
  3486. int sde_rotator_r3_init(struct sde_rot_mgr *mgr)
  3487. {
  3488. struct sde_hw_rotator *rot;
  3489. struct sde_rot_data_type *mdata = sde_rot_get_mdata();
  3490. int i;
  3491. int ret;
  3492. rot = devm_kzalloc(&mgr->pdev->dev, sizeof(*rot), GFP_KERNEL);
  3493. if (!rot)
  3494. return -ENOMEM;
  3495. mgr->hw_data = rot;
  3496. mgr->queue_count = ROT_QUEUE_MAX;
  3497. rot->mdss_base = mdata->sde_io.base;
  3498. rot->pdev = mgr->pdev;
  3499. rot->koff_timeout = KOFF_TIMEOUT;
  3500. rot->vid_trigger = ROTTOP_START_CTRL_TRIG_SEL_MDP;
  3501. rot->cmd_trigger = ROTTOP_START_CTRL_TRIG_SEL_MDP;
  3502. /* Assign ops */
  3503. mgr->ops_hw_destroy = sde_hw_rotator_destroy;
  3504. mgr->ops_hw_alloc = sde_hw_rotator_alloc_ext;
  3505. mgr->ops_hw_free = sde_hw_rotator_free_ext;
  3506. mgr->ops_config_hw = sde_hw_rotator_config;
  3507. mgr->ops_cancel_hw = sde_hw_rotator_cancel;
  3508. mgr->ops_abort_hw = sde_hw_rotator_abort_kickoff;
  3509. mgr->ops_kickoff_entry = sde_hw_rotator_kickoff;
  3510. mgr->ops_wait_for_entry = sde_hw_rotator_wait4done;
  3511. mgr->ops_hw_validate_entry = sde_hw_rotator_validate_entry;
  3512. mgr->ops_hw_show_caps = sde_hw_rotator_show_caps;
  3513. mgr->ops_hw_show_state = sde_hw_rotator_show_state;
  3514. mgr->ops_hw_create_debugfs = sde_rotator_r3_create_debugfs;
  3515. mgr->ops_hw_get_pixfmt = sde_hw_rotator_get_pixfmt;
  3516. mgr->ops_hw_is_valid_pixfmt = sde_hw_rotator_is_valid_pixfmt;
  3517. mgr->ops_hw_pre_pmevent = sde_hw_rotator_pre_pmevent;
  3518. mgr->ops_hw_post_pmevent = sde_hw_rotator_post_pmevent;
  3519. mgr->ops_hw_get_downscale_caps = sde_hw_rotator_get_downscale_caps;
  3520. mgr->ops_hw_get_maxlinewidth = sde_hw_rotator_get_maxlinewidth;
  3521. mgr->ops_hw_dump_status = sde_hw_rotator_dump_status;
  3522. ret = sde_hw_rotator_parse_dt(mgr->hw_data, mgr->pdev);
  3523. if (ret)
  3524. goto error_parse_dt;
  3525. rot->irq_num = -EINVAL;
  3526. atomic_set(&rot->irq_enabled, 0);
  3527. ret = sde_rotator_hw_rev_init(rot);
  3528. if (ret)
  3529. goto error_hw_rev_init;
  3530. setup_rotator_ops(&rot->ops, rot->mode,
  3531. test_bit(SDE_CAPS_HW_TIMESTAMP, mdata->sde_caps_map));
  3532. spin_lock_init(&rot->rotctx_lock);
  3533. spin_lock_init(&rot->rotisr_lock);
  3534. /* REGDMA initialization */
  3535. if (rot->mode == ROT_REGDMA_OFF) {
  3536. for (i = 0; i < SDE_HW_ROT_REGDMA_TOTAL_CTX; i++)
  3537. rot->cmd_wr_ptr[0][i] = (char __iomem *)(
  3538. &rot->cmd_queue[
  3539. SDE_HW_ROT_REGDMA_SEG_SIZE * i]);
  3540. } else {
  3541. for (i = 0; i < SDE_HW_ROT_REGDMA_TOTAL_CTX; i++)
  3542. rot->cmd_wr_ptr[ROT_QUEUE_HIGH_PRIORITY][i] =
  3543. rot->mdss_base +
  3544. REGDMA_RAM_REGDMA_CMD_RAM +
  3545. SDE_HW_ROT_REGDMA_SEG_SIZE * 4 * i;
  3546. for (i = 0; i < SDE_HW_ROT_REGDMA_TOTAL_CTX; i++)
  3547. rot->cmd_wr_ptr[ROT_QUEUE_LOW_PRIORITY][i] =
  3548. rot->mdss_base +
  3549. REGDMA_RAM_REGDMA_CMD_RAM +
  3550. SDE_HW_ROT_REGDMA_SEG_SIZE * 4 *
  3551. (i + SDE_HW_ROT_REGDMA_TOTAL_CTX);
  3552. }
  3553. for (i = 0; i < ROT_QUEUE_MAX; i++) {
  3554. atomic_set(&rot->timestamp[i], 0);
  3555. INIT_LIST_HEAD(&rot->sbuf_ctx[i]);
  3556. }
  3557. mdata->sde_rot_hw = rot;
  3558. return 0;
  3559. error_hw_rev_init:
  3560. devm_kfree(&mgr->pdev->dev, mgr->hw_data);
  3561. error_parse_dt:
  3562. return ret;
  3563. }