sde_rotator_r3.c 114 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
  4. */
  5. #define pr_fmt(fmt) "%s:%d: " fmt, __func__, __LINE__
  6. #include <linux/platform_device.h>
  7. #include <linux/module.h>
  8. #include <linux/fs.h>
  9. #include <linux/file.h>
  10. #include <linux/delay.h>
  11. #include <linux/debugfs.h>
  12. #include <linux/interrupt.h>
  13. #include <linux/dma-mapping.h>
  14. #include <linux/dma-buf.h>
  15. #include <linux/clk.h>
  16. #include <linux/clk/qcom.h>
  17. #include "sde_rotator_core.h"
  18. #include "sde_rotator_util.h"
  19. #include "sde_rotator_smmu.h"
  20. #include "sde_rotator_r3.h"
  21. #include "sde_rotator_r3_internal.h"
  22. #include "sde_rotator_r3_hwio.h"
  23. #include "sde_rotator_r3_debug.h"
  24. #include "sde_rotator_trace.h"
  25. #include "sde_rotator_debug.h"
  26. #include "sde_rotator_vbif.h"
  27. #define RES_UHD (3840*2160)
  28. #define MS_TO_US(t) ((t) * USEC_PER_MSEC)
  29. /* traffic shaping clock ticks = finish_time x 19.2MHz */
  30. #define TRAFFIC_SHAPE_CLKTICK_14MS 268800
  31. #define TRAFFIC_SHAPE_CLKTICK_12MS 230400
  32. #define TRAFFIC_SHAPE_VSYNC_CLK 19200000
  33. /* wait for at most 2 vsync for lowest refresh rate (24hz) */
  34. #define KOFF_TIMEOUT (42 * 8)
  35. /*
  36. * When in sbuf mode, select a much longer wait, to allow the other driver
  37. * to detect timeouts and abort if necessary.
  38. */
  39. #define KOFF_TIMEOUT_SBUF (10000)
  40. /* default stream buffer headroom in lines */
  41. #define DEFAULT_SBUF_HEADROOM 20
  42. #define DEFAULT_UBWC_MALSIZE 0
  43. #define DEFAULT_UBWC_SWIZZLE 0
  44. #define DEFAULT_MAXLINEWIDTH 4096
  45. /* stride alignment requirement for avoiding partial writes */
  46. #define PARTIAL_WRITE_ALIGNMENT 0x1F
  47. /* Macro for constructing the REGDMA command */
  48. #define SDE_REGDMA_WRITE(p, off, data) \
  49. do { \
  50. SDEROT_DBG("SDEREG.W:[%s:0x%X] <= 0x%X\n", #off, (off),\
  51. (u32)(data));\
  52. writel_relaxed_no_log( \
  53. (REGDMA_OP_REGWRITE | \
  54. ((off) & REGDMA_ADDR_OFFSET_MASK)), \
  55. p); \
  56. p += sizeof(u32); \
  57. writel_relaxed_no_log(data, p); \
  58. p += sizeof(u32); \
  59. } while (0)
  60. #define SDE_REGDMA_MODIFY(p, off, mask, data) \
  61. do { \
  62. SDEROT_DBG("SDEREG.M:[%s:0x%X] <= 0x%X\n", #off, (off),\
  63. (u32)(data));\
  64. writel_relaxed_no_log( \
  65. (REGDMA_OP_REGMODIFY | \
  66. ((off) & REGDMA_ADDR_OFFSET_MASK)), \
  67. p); \
  68. p += sizeof(u32); \
  69. writel_relaxed_no_log(mask, p); \
  70. p += sizeof(u32); \
  71. writel_relaxed_no_log(data, p); \
  72. p += sizeof(u32); \
  73. } while (0)
  74. #define SDE_REGDMA_BLKWRITE_INC(p, off, len) \
  75. do { \
  76. SDEROT_DBG("SDEREG.B:[%s:0x%X:0x%X]\n", #off, (off),\
  77. (u32)(len));\
  78. writel_relaxed_no_log( \
  79. (REGDMA_OP_BLKWRITE_INC | \
  80. ((off) & REGDMA_ADDR_OFFSET_MASK)), \
  81. p); \
  82. p += sizeof(u32); \
  83. writel_relaxed_no_log(len, p); \
  84. p += sizeof(u32); \
  85. } while (0)
  86. #define SDE_REGDMA_BLKWRITE_DATA(p, data) \
  87. do { \
  88. SDEROT_DBG("SDEREG.I:[:] <= 0x%X\n", (u32)(data));\
  89. writel_relaxed_no_log(data, p); \
  90. p += sizeof(u32); \
  91. } while (0)
  92. #define SDE_REGDMA_READ(p, data) \
  93. do { \
  94. data = readl_relaxed_no_log(p); \
  95. p += sizeof(u32); \
  96. } while (0)
  97. /* Macro for directly accessing mapped registers */
  98. #define SDE_ROTREG_WRITE(base, off, data) \
  99. do { \
  100. SDEROT_DBG("SDEREG.D:[%s:0x%X] <= 0x%X\n", #off, (off)\
  101. , (u32)(data));\
  102. writel_relaxed(data, (base + (off))); \
  103. } while (0)
  104. #define SDE_ROTREG_READ(base, off) \
  105. readl_relaxed(base + (off))
  106. #define SDE_ROTTOP_IN_OFFLINE_MODE(_rottop_op_mode_) \
  107. (((_rottop_op_mode_) & ROTTOP_OP_MODE_ROT_OUT_MASK) == 0)
  108. static const u32 sde_hw_rotator_v3_inpixfmts[] = {
  109. SDE_PIX_FMT_XRGB_8888,
  110. SDE_PIX_FMT_ARGB_8888,
  111. SDE_PIX_FMT_ABGR_8888,
  112. SDE_PIX_FMT_RGBA_8888,
  113. SDE_PIX_FMT_BGRA_8888,
  114. SDE_PIX_FMT_RGBX_8888,
  115. SDE_PIX_FMT_BGRX_8888,
  116. SDE_PIX_FMT_XBGR_8888,
  117. SDE_PIX_FMT_RGBA_5551,
  118. SDE_PIX_FMT_ARGB_1555,
  119. SDE_PIX_FMT_ABGR_1555,
  120. SDE_PIX_FMT_BGRA_5551,
  121. SDE_PIX_FMT_BGRX_5551,
  122. SDE_PIX_FMT_RGBX_5551,
  123. SDE_PIX_FMT_XBGR_1555,
  124. SDE_PIX_FMT_XRGB_1555,
  125. SDE_PIX_FMT_ARGB_4444,
  126. SDE_PIX_FMT_RGBA_4444,
  127. SDE_PIX_FMT_BGRA_4444,
  128. SDE_PIX_FMT_ABGR_4444,
  129. SDE_PIX_FMT_RGBX_4444,
  130. SDE_PIX_FMT_XRGB_4444,
  131. SDE_PIX_FMT_BGRX_4444,
  132. SDE_PIX_FMT_XBGR_4444,
  133. SDE_PIX_FMT_RGB_888,
  134. SDE_PIX_FMT_BGR_888,
  135. SDE_PIX_FMT_RGB_565,
  136. SDE_PIX_FMT_BGR_565,
  137. SDE_PIX_FMT_Y_CB_CR_H2V2,
  138. SDE_PIX_FMT_Y_CR_CB_H2V2,
  139. SDE_PIX_FMT_Y_CR_CB_GH2V2,
  140. SDE_PIX_FMT_Y_CBCR_H2V2,
  141. SDE_PIX_FMT_Y_CRCB_H2V2,
  142. SDE_PIX_FMT_Y_CBCR_H1V2,
  143. SDE_PIX_FMT_Y_CRCB_H1V2,
  144. SDE_PIX_FMT_Y_CBCR_H2V1,
  145. SDE_PIX_FMT_Y_CRCB_H2V1,
  146. SDE_PIX_FMT_YCBYCR_H2V1,
  147. SDE_PIX_FMT_Y_CBCR_H2V2_VENUS,
  148. SDE_PIX_FMT_Y_CRCB_H2V2_VENUS,
  149. SDE_PIX_FMT_RGBA_8888_UBWC,
  150. SDE_PIX_FMT_RGBX_8888_UBWC,
  151. SDE_PIX_FMT_RGB_565_UBWC,
  152. SDE_PIX_FMT_Y_CBCR_H2V2_UBWC,
  153. SDE_PIX_FMT_RGBA_1010102,
  154. SDE_PIX_FMT_RGBX_1010102,
  155. SDE_PIX_FMT_ARGB_2101010,
  156. SDE_PIX_FMT_XRGB_2101010,
  157. SDE_PIX_FMT_BGRA_1010102,
  158. SDE_PIX_FMT_BGRX_1010102,
  159. SDE_PIX_FMT_ABGR_2101010,
  160. SDE_PIX_FMT_XBGR_2101010,
  161. SDE_PIX_FMT_RGBA_1010102_UBWC,
  162. SDE_PIX_FMT_RGBX_1010102_UBWC,
  163. SDE_PIX_FMT_Y_CBCR_H2V2_P010,
  164. SDE_PIX_FMT_Y_CBCR_H2V2_TP10,
  165. SDE_PIX_FMT_Y_CBCR_H2V2_TP10_UBWC,
  166. };
  167. static const u32 sde_hw_rotator_v3_outpixfmts[] = {
  168. SDE_PIX_FMT_XRGB_8888,
  169. SDE_PIX_FMT_ARGB_8888,
  170. SDE_PIX_FMT_ABGR_8888,
  171. SDE_PIX_FMT_RGBA_8888,
  172. SDE_PIX_FMT_BGRA_8888,
  173. SDE_PIX_FMT_RGBX_8888,
  174. SDE_PIX_FMT_BGRX_8888,
  175. SDE_PIX_FMT_XBGR_8888,
  176. SDE_PIX_FMT_RGBA_5551,
  177. SDE_PIX_FMT_ARGB_1555,
  178. SDE_PIX_FMT_ABGR_1555,
  179. SDE_PIX_FMT_BGRA_5551,
  180. SDE_PIX_FMT_BGRX_5551,
  181. SDE_PIX_FMT_RGBX_5551,
  182. SDE_PIX_FMT_XBGR_1555,
  183. SDE_PIX_FMT_XRGB_1555,
  184. SDE_PIX_FMT_ARGB_4444,
  185. SDE_PIX_FMT_RGBA_4444,
  186. SDE_PIX_FMT_BGRA_4444,
  187. SDE_PIX_FMT_ABGR_4444,
  188. SDE_PIX_FMT_RGBX_4444,
  189. SDE_PIX_FMT_XRGB_4444,
  190. SDE_PIX_FMT_BGRX_4444,
  191. SDE_PIX_FMT_XBGR_4444,
  192. SDE_PIX_FMT_RGB_888,
  193. SDE_PIX_FMT_BGR_888,
  194. SDE_PIX_FMT_RGB_565,
  195. SDE_PIX_FMT_BGR_565,
  196. /* SDE_PIX_FMT_Y_CB_CR_H2V2 */
  197. /* SDE_PIX_FMT_Y_CR_CB_H2V2 */
  198. /* SDE_PIX_FMT_Y_CR_CB_GH2V2 */
  199. SDE_PIX_FMT_Y_CBCR_H2V2,
  200. SDE_PIX_FMT_Y_CRCB_H2V2,
  201. SDE_PIX_FMT_Y_CBCR_H1V2,
  202. SDE_PIX_FMT_Y_CRCB_H1V2,
  203. SDE_PIX_FMT_Y_CBCR_H2V1,
  204. SDE_PIX_FMT_Y_CRCB_H2V1,
  205. /* SDE_PIX_FMT_YCBYCR_H2V1 */
  206. SDE_PIX_FMT_Y_CBCR_H2V2_VENUS,
  207. SDE_PIX_FMT_Y_CRCB_H2V2_VENUS,
  208. SDE_PIX_FMT_RGBA_8888_UBWC,
  209. SDE_PIX_FMT_RGBX_8888_UBWC,
  210. SDE_PIX_FMT_RGB_565_UBWC,
  211. SDE_PIX_FMT_Y_CBCR_H2V2_UBWC,
  212. SDE_PIX_FMT_RGBA_1010102,
  213. SDE_PIX_FMT_RGBX_1010102,
  214. /* SDE_PIX_FMT_ARGB_2101010 */
  215. /* SDE_PIX_FMT_XRGB_2101010 */
  216. SDE_PIX_FMT_BGRA_1010102,
  217. SDE_PIX_FMT_BGRX_1010102,
  218. /* SDE_PIX_FMT_ABGR_2101010 */
  219. /* SDE_PIX_FMT_XBGR_2101010 */
  220. SDE_PIX_FMT_RGBA_1010102_UBWC,
  221. SDE_PIX_FMT_RGBX_1010102_UBWC,
  222. SDE_PIX_FMT_Y_CBCR_H2V2_P010,
  223. SDE_PIX_FMT_Y_CBCR_H2V2_TP10,
  224. SDE_PIX_FMT_Y_CBCR_H2V2_TP10_UBWC,
  225. };
  226. static const u32 sde_hw_rotator_v4_inpixfmts[] = {
  227. SDE_PIX_FMT_XRGB_8888,
  228. SDE_PIX_FMT_ARGB_8888,
  229. SDE_PIX_FMT_ABGR_8888,
  230. SDE_PIX_FMT_RGBA_8888,
  231. SDE_PIX_FMT_BGRA_8888,
  232. SDE_PIX_FMT_RGBX_8888,
  233. SDE_PIX_FMT_BGRX_8888,
  234. SDE_PIX_FMT_XBGR_8888,
  235. SDE_PIX_FMT_RGBA_5551,
  236. SDE_PIX_FMT_ARGB_1555,
  237. SDE_PIX_FMT_ABGR_1555,
  238. SDE_PIX_FMT_BGRA_5551,
  239. SDE_PIX_FMT_BGRX_5551,
  240. SDE_PIX_FMT_RGBX_5551,
  241. SDE_PIX_FMT_XBGR_1555,
  242. SDE_PIX_FMT_XRGB_1555,
  243. SDE_PIX_FMT_ARGB_4444,
  244. SDE_PIX_FMT_RGBA_4444,
  245. SDE_PIX_FMT_BGRA_4444,
  246. SDE_PIX_FMT_ABGR_4444,
  247. SDE_PIX_FMT_RGBX_4444,
  248. SDE_PIX_FMT_XRGB_4444,
  249. SDE_PIX_FMT_BGRX_4444,
  250. SDE_PIX_FMT_XBGR_4444,
  251. SDE_PIX_FMT_RGB_888,
  252. SDE_PIX_FMT_BGR_888,
  253. SDE_PIX_FMT_RGB_565,
  254. SDE_PIX_FMT_BGR_565,
  255. SDE_PIX_FMT_Y_CB_CR_H2V2,
  256. SDE_PIX_FMT_Y_CR_CB_H2V2,
  257. SDE_PIX_FMT_Y_CR_CB_GH2V2,
  258. SDE_PIX_FMT_Y_CBCR_H2V2,
  259. SDE_PIX_FMT_Y_CRCB_H2V2,
  260. SDE_PIX_FMT_Y_CBCR_H1V2,
  261. SDE_PIX_FMT_Y_CRCB_H1V2,
  262. SDE_PIX_FMT_Y_CBCR_H2V1,
  263. SDE_PIX_FMT_Y_CRCB_H2V1,
  264. SDE_PIX_FMT_YCBYCR_H2V1,
  265. SDE_PIX_FMT_Y_CBCR_H2V2_VENUS,
  266. SDE_PIX_FMT_Y_CRCB_H2V2_VENUS,
  267. SDE_PIX_FMT_RGBA_8888_UBWC,
  268. SDE_PIX_FMT_RGBX_8888_UBWC,
  269. SDE_PIX_FMT_RGB_565_UBWC,
  270. SDE_PIX_FMT_Y_CBCR_H2V2_UBWC,
  271. SDE_PIX_FMT_RGBA_1010102,
  272. SDE_PIX_FMT_RGBX_1010102,
  273. SDE_PIX_FMT_ARGB_2101010,
  274. SDE_PIX_FMT_XRGB_2101010,
  275. SDE_PIX_FMT_BGRA_1010102,
  276. SDE_PIX_FMT_BGRX_1010102,
  277. SDE_PIX_FMT_ABGR_2101010,
  278. SDE_PIX_FMT_XBGR_2101010,
  279. SDE_PIX_FMT_RGBA_1010102_UBWC,
  280. SDE_PIX_FMT_RGBX_1010102_UBWC,
  281. SDE_PIX_FMT_Y_CBCR_H2V2_P010,
  282. SDE_PIX_FMT_Y_CBCR_H2V2_P010_VENUS,
  283. SDE_PIX_FMT_Y_CBCR_H2V2_TP10,
  284. SDE_PIX_FMT_Y_CBCR_H2V2_TP10_UBWC,
  285. SDE_PIX_FMT_Y_CBCR_H2V2_P010_UBWC,
  286. SDE_PIX_FMT_Y_CBCR_H2V2_P010_TILE,
  287. SDE_PIX_FMT_Y_CBCR_H2V2_TILE,
  288. SDE_PIX_FMT_Y_CRCB_H2V2_TILE,
  289. SDE_PIX_FMT_XRGB_8888_TILE,
  290. SDE_PIX_FMT_ARGB_8888_TILE,
  291. SDE_PIX_FMT_ABGR_8888_TILE,
  292. SDE_PIX_FMT_XBGR_8888_TILE,
  293. SDE_PIX_FMT_RGBA_8888_TILE,
  294. SDE_PIX_FMT_BGRA_8888_TILE,
  295. SDE_PIX_FMT_RGBX_8888_TILE,
  296. SDE_PIX_FMT_BGRX_8888_TILE,
  297. SDE_PIX_FMT_RGBA_1010102_TILE,
  298. SDE_PIX_FMT_RGBX_1010102_TILE,
  299. SDE_PIX_FMT_ARGB_2101010_TILE,
  300. SDE_PIX_FMT_XRGB_2101010_TILE,
  301. SDE_PIX_FMT_BGRA_1010102_TILE,
  302. SDE_PIX_FMT_BGRX_1010102_TILE,
  303. SDE_PIX_FMT_ABGR_2101010_TILE,
  304. SDE_PIX_FMT_XBGR_2101010_TILE,
  305. };
  306. static const u32 sde_hw_rotator_v4_outpixfmts[] = {
  307. SDE_PIX_FMT_XRGB_8888,
  308. SDE_PIX_FMT_ARGB_8888,
  309. SDE_PIX_FMT_ABGR_8888,
  310. SDE_PIX_FMT_RGBA_8888,
  311. SDE_PIX_FMT_BGRA_8888,
  312. SDE_PIX_FMT_RGBX_8888,
  313. SDE_PIX_FMT_BGRX_8888,
  314. SDE_PIX_FMT_XBGR_8888,
  315. SDE_PIX_FMT_RGBA_5551,
  316. SDE_PIX_FMT_ARGB_1555,
  317. SDE_PIX_FMT_ABGR_1555,
  318. SDE_PIX_FMT_BGRA_5551,
  319. SDE_PIX_FMT_BGRX_5551,
  320. SDE_PIX_FMT_RGBX_5551,
  321. SDE_PIX_FMT_XBGR_1555,
  322. SDE_PIX_FMT_XRGB_1555,
  323. SDE_PIX_FMT_ARGB_4444,
  324. SDE_PIX_FMT_RGBA_4444,
  325. SDE_PIX_FMT_BGRA_4444,
  326. SDE_PIX_FMT_ABGR_4444,
  327. SDE_PIX_FMT_RGBX_4444,
  328. SDE_PIX_FMT_XRGB_4444,
  329. SDE_PIX_FMT_BGRX_4444,
  330. SDE_PIX_FMT_XBGR_4444,
  331. SDE_PIX_FMT_RGB_888,
  332. SDE_PIX_FMT_BGR_888,
  333. SDE_PIX_FMT_RGB_565,
  334. SDE_PIX_FMT_BGR_565,
  335. /* SDE_PIX_FMT_Y_CB_CR_H2V2 */
  336. /* SDE_PIX_FMT_Y_CR_CB_H2V2 */
  337. /* SDE_PIX_FMT_Y_CR_CB_GH2V2 */
  338. SDE_PIX_FMT_Y_CBCR_H2V2,
  339. SDE_PIX_FMT_Y_CRCB_H2V2,
  340. SDE_PIX_FMT_Y_CBCR_H1V2,
  341. SDE_PIX_FMT_Y_CRCB_H1V2,
  342. SDE_PIX_FMT_Y_CBCR_H2V1,
  343. SDE_PIX_FMT_Y_CRCB_H2V1,
  344. /* SDE_PIX_FMT_YCBYCR_H2V1 */
  345. SDE_PIX_FMT_Y_CBCR_H2V2_VENUS,
  346. SDE_PIX_FMT_Y_CRCB_H2V2_VENUS,
  347. SDE_PIX_FMT_RGBA_8888_UBWC,
  348. SDE_PIX_FMT_RGBX_8888_UBWC,
  349. SDE_PIX_FMT_RGB_565_UBWC,
  350. SDE_PIX_FMT_Y_CBCR_H2V2_UBWC,
  351. SDE_PIX_FMT_RGBA_1010102,
  352. SDE_PIX_FMT_RGBX_1010102,
  353. SDE_PIX_FMT_ARGB_2101010,
  354. SDE_PIX_FMT_XRGB_2101010,
  355. SDE_PIX_FMT_BGRA_1010102,
  356. SDE_PIX_FMT_BGRX_1010102,
  357. SDE_PIX_FMT_ABGR_2101010,
  358. SDE_PIX_FMT_XBGR_2101010,
  359. SDE_PIX_FMT_RGBA_1010102_UBWC,
  360. SDE_PIX_FMT_RGBX_1010102_UBWC,
  361. SDE_PIX_FMT_Y_CBCR_H2V2_P010,
  362. SDE_PIX_FMT_Y_CBCR_H2V2_P010_VENUS,
  363. SDE_PIX_FMT_Y_CBCR_H2V2_TP10,
  364. SDE_PIX_FMT_Y_CBCR_H2V2_TP10_UBWC,
  365. SDE_PIX_FMT_Y_CBCR_H2V2_P010_UBWC,
  366. SDE_PIX_FMT_Y_CBCR_H2V2_P010_TILE,
  367. SDE_PIX_FMT_Y_CBCR_H2V2_TILE,
  368. SDE_PIX_FMT_Y_CRCB_H2V2_TILE,
  369. SDE_PIX_FMT_XRGB_8888_TILE,
  370. SDE_PIX_FMT_ARGB_8888_TILE,
  371. SDE_PIX_FMT_ABGR_8888_TILE,
  372. SDE_PIX_FMT_XBGR_8888_TILE,
  373. SDE_PIX_FMT_RGBA_8888_TILE,
  374. SDE_PIX_FMT_BGRA_8888_TILE,
  375. SDE_PIX_FMT_RGBX_8888_TILE,
  376. SDE_PIX_FMT_BGRX_8888_TILE,
  377. SDE_PIX_FMT_RGBA_1010102_TILE,
  378. SDE_PIX_FMT_RGBX_1010102_TILE,
  379. SDE_PIX_FMT_ARGB_2101010_TILE,
  380. SDE_PIX_FMT_XRGB_2101010_TILE,
  381. SDE_PIX_FMT_BGRA_1010102_TILE,
  382. SDE_PIX_FMT_BGRX_1010102_TILE,
  383. SDE_PIX_FMT_ABGR_2101010_TILE,
  384. SDE_PIX_FMT_XBGR_2101010_TILE,
  385. };
  386. static const u32 sde_hw_rotator_v4_inpixfmts_sbuf[] = {
  387. SDE_PIX_FMT_Y_CBCR_H2V2_P010,
  388. SDE_PIX_FMT_Y_CBCR_H2V2,
  389. SDE_PIX_FMT_Y_CRCB_H2V2,
  390. SDE_PIX_FMT_Y_CBCR_H2V2_TP10_UBWC,
  391. SDE_PIX_FMT_Y_CBCR_H2V2_P010_UBWC,
  392. SDE_PIX_FMT_Y_CBCR_H2V2_UBWC,
  393. SDE_PIX_FMT_Y_CBCR_H2V2_TP10,
  394. SDE_PIX_FMT_Y_CBCR_H2V2_P010_TILE,
  395. SDE_PIX_FMT_Y_CBCR_H2V2_TILE,
  396. };
  397. static const u32 sde_hw_rotator_v4_outpixfmts_sbuf[] = {
  398. SDE_PIX_FMT_Y_CBCR_H2V2_TP10,
  399. SDE_PIX_FMT_Y_CBCR_H2V2_P010_TILE,
  400. SDE_PIX_FMT_Y_CBCR_H2V2_TILE,
  401. };
  402. static struct sde_rot_vbif_debug_bus nrt_vbif_dbg_bus_r3[] = {
  403. {0x214, 0x21c, 16, 1, 0x200}, /* arb clients main */
  404. {0x214, 0x21c, 0, 12, 0x13}, /* xin blocks - axi side */
  405. {0x21c, 0x214, 0, 12, 0xc}, /* xin blocks - clock side */
  406. };
  407. static struct sde_rot_debug_bus rot_dbgbus_r3[] = {
  408. /*
  409. * rottop - 0xA8850
  410. */
  411. /* REGDMA */
  412. { 0XA8850, 0, 0 },
  413. { 0XA8850, 0, 1 },
  414. { 0XA8850, 0, 2 },
  415. { 0XA8850, 0, 3 },
  416. { 0XA8850, 0, 4 },
  417. /* ROT_WB */
  418. { 0XA8850, 1, 0 },
  419. { 0XA8850, 1, 1 },
  420. { 0XA8850, 1, 2 },
  421. { 0XA8850, 1, 3 },
  422. { 0XA8850, 1, 4 },
  423. { 0XA8850, 1, 5 },
  424. { 0XA8850, 1, 6 },
  425. { 0XA8850, 1, 7 },
  426. /* UBWC_DEC */
  427. { 0XA8850, 2, 0 },
  428. /* UBWC_ENC */
  429. { 0XA8850, 3, 0 },
  430. /* ROT_FETCH_0 */
  431. { 0XA8850, 4, 0 },
  432. { 0XA8850, 4, 1 },
  433. { 0XA8850, 4, 2 },
  434. { 0XA8850, 4, 3 },
  435. { 0XA8850, 4, 4 },
  436. { 0XA8850, 4, 5 },
  437. { 0XA8850, 4, 6 },
  438. { 0XA8850, 4, 7 },
  439. /* ROT_FETCH_1 */
  440. { 0XA8850, 5, 0 },
  441. { 0XA8850, 5, 1 },
  442. { 0XA8850, 5, 2 },
  443. { 0XA8850, 5, 3 },
  444. { 0XA8850, 5, 4 },
  445. { 0XA8850, 5, 5 },
  446. { 0XA8850, 5, 6 },
  447. { 0XA8850, 5, 7 },
  448. /* ROT_FETCH_2 */
  449. { 0XA8850, 6, 0 },
  450. { 0XA8850, 6, 1 },
  451. { 0XA8850, 6, 2 },
  452. { 0XA8850, 6, 3 },
  453. { 0XA8850, 6, 4 },
  454. { 0XA8850, 6, 5 },
  455. { 0XA8850, 6, 6 },
  456. { 0XA8850, 6, 7 },
  457. /* ROT_FETCH_3 */
  458. { 0XA8850, 7, 0 },
  459. { 0XA8850, 7, 1 },
  460. { 0XA8850, 7, 2 },
  461. { 0XA8850, 7, 3 },
  462. { 0XA8850, 7, 4 },
  463. { 0XA8850, 7, 5 },
  464. { 0XA8850, 7, 6 },
  465. { 0XA8850, 7, 7 },
  466. /* ROT_FETCH_4 */
  467. { 0XA8850, 8, 0 },
  468. { 0XA8850, 8, 1 },
  469. { 0XA8850, 8, 2 },
  470. { 0XA8850, 8, 3 },
  471. { 0XA8850, 8, 4 },
  472. { 0XA8850, 8, 5 },
  473. { 0XA8850, 8, 6 },
  474. { 0XA8850, 8, 7 },
  475. /* ROT_UNPACK_0*/
  476. { 0XA8850, 9, 0 },
  477. { 0XA8850, 9, 1 },
  478. { 0XA8850, 9, 2 },
  479. { 0XA8850, 9, 3 },
  480. };
  481. static struct sde_rot_regdump sde_rot_r3_regdump[] = {
  482. { "SDEROT_ROTTOP", SDE_ROT_ROTTOP_OFFSET, 0x100, SDE_ROT_REGDUMP_READ },
  483. { "SDEROT_SSPP", SDE_ROT_SSPP_OFFSET, 0x200, SDE_ROT_REGDUMP_READ },
  484. { "SDEROT_WB", SDE_ROT_WB_OFFSET, 0x300, SDE_ROT_REGDUMP_READ },
  485. { "SDEROT_REGDMA_CSR", SDE_ROT_REGDMA_OFFSET, 0x100,
  486. SDE_ROT_REGDUMP_READ },
  487. /*
  488. * Need to perform a SW reset to REGDMA in order to access the
  489. * REGDMA RAM especially if REGDMA is waiting for Rotator IDLE.
  490. * REGDMA RAM should be dump at last.
  491. */
  492. { "SDEROT_REGDMA_RESET", ROTTOP_SW_RESET_OVERRIDE, 1,
  493. SDE_ROT_REGDUMP_WRITE, 1 },
  494. { "SDEROT_REGDMA_RAM", SDE_ROT_REGDMA_RAM_OFFSET, 0x2000,
  495. SDE_ROT_REGDUMP_READ },
  496. { "SDEROT_VBIF_NRT", SDE_ROT_VBIF_NRT_OFFSET, 0x590,
  497. SDE_ROT_REGDUMP_VBIF },
  498. { "SDEROT_REGDMA_RESET", ROTTOP_SW_RESET_OVERRIDE, 1,
  499. SDE_ROT_REGDUMP_WRITE, 0 },
  500. };
  501. struct sde_rot_cdp_params {
  502. bool enable;
  503. struct sde_mdp_format_params *fmt;
  504. u32 offset;
  505. };
  506. /* Invalid software timestamp value for initialization */
  507. #define SDE_REGDMA_SWTS_INVALID (~0)
  508. /**
  509. * __sde_hw_rotator_get_timestamp - obtain rotator current timestamp
  510. * @rot: rotator context
  511. * @q_id: regdma queue id (low/high)
  512. * @return: current timestmap
  513. */
  514. static u32 __sde_hw_rotator_get_timestamp(struct sde_hw_rotator *rot, u32 q_id)
  515. {
  516. struct sde_rot_data_type *mdata = sde_rot_get_mdata();
  517. u32 ts;
  518. if (test_bit(SDE_CAPS_HW_TIMESTAMP, mdata->sde_caps_map)) {
  519. if (q_id == ROT_QUEUE_HIGH_PRIORITY)
  520. ts = SDE_ROTREG_READ(rot->mdss_base, ROTTOP_ROT_CNTR_0);
  521. else
  522. ts = SDE_ROTREG_READ(rot->mdss_base, ROTTOP_ROT_CNTR_1);
  523. } else {
  524. ts = SDE_ROTREG_READ(rot->mdss_base, REGDMA_TIMESTAMP_REG);
  525. if (q_id == ROT_QUEUE_LOW_PRIORITY)
  526. ts >>= SDE_REGDMA_SWTS_SHIFT;
  527. }
  528. return ts & SDE_REGDMA_SWTS_MASK;
  529. }
  530. /**
  531. * sde_hw_rotator_elapsed_swts - Find difference of 2 software timestamps
  532. * @ts_curr: current software timestamp
  533. * @ts_prev: previous software timestamp
  534. * @return: the amount ts_curr is ahead of ts_prev
  535. */
  536. static int sde_hw_rotator_elapsed_swts(u32 ts_curr, u32 ts_prev)
  537. {
  538. u32 diff = (ts_curr - ts_prev) & SDE_REGDMA_SWTS_MASK;
  539. return sign_extend32(diff, (SDE_REGDMA_SWTS_SHIFT - 1));
  540. }
  541. /**
  542. * sde_hw_rotator_pending_hwts - Check if the given context is still pending
  543. * @rot: Pointer to hw rotator
  544. * @ctx: Pointer to rotator context
  545. * @phwts: Pointer to returned reference hw timestamp, optional
  546. * @return: true if context has pending requests
  547. */
  548. static int sde_hw_rotator_pending_hwts(struct sde_hw_rotator *rot,
  549. struct sde_hw_rotator_context *ctx, u32 *phwts)
  550. {
  551. u32 hwts;
  552. int ts_diff;
  553. bool pending;
  554. if (ctx->last_regdma_timestamp == SDE_REGDMA_SWTS_INVALID) {
  555. if (ctx->q_id == ROT_QUEUE_LOW_PRIORITY)
  556. hwts = SDE_ROTREG_READ(rot->mdss_base,
  557. ROTTOP_ROT_CNTR_1);
  558. else
  559. hwts = SDE_ROTREG_READ(rot->mdss_base,
  560. ROTTOP_ROT_CNTR_0);
  561. } else {
  562. hwts = ctx->last_regdma_timestamp;
  563. }
  564. hwts &= SDE_REGDMA_SWTS_MASK;
  565. ts_diff = sde_hw_rotator_elapsed_swts(ctx->timestamp, hwts);
  566. if (phwts)
  567. *phwts = hwts;
  568. pending = (ts_diff > 0) ? true : false;
  569. SDEROT_DBG("ts:0x%x, queue_id:%d, hwts:0x%x, pending:%d\n",
  570. ctx->timestamp, ctx->q_id, hwts, pending);
  571. SDEROT_EVTLOG(ctx->timestamp, hwts, ctx->q_id, ts_diff);
  572. return pending;
  573. }
  574. /**
  575. * sde_hw_rotator_update_hwts - update hw timestamp with given value
  576. * @rot: Pointer to hw rotator
  577. * @q_id: rotator queue id
  578. * @hwts: new hw timestamp
  579. */
  580. static void sde_hw_rotator_update_hwts(struct sde_hw_rotator *rot,
  581. u32 q_id, u32 hwts)
  582. {
  583. if (q_id == ROT_QUEUE_LOW_PRIORITY)
  584. SDE_ROTREG_WRITE(rot->mdss_base, ROTTOP_ROT_CNTR_1, hwts);
  585. else
  586. SDE_ROTREG_WRITE(rot->mdss_base, ROTTOP_ROT_CNTR_0, hwts);
  587. }
  588. /**
  589. * sde_hw_rotator_pending_swts - Check if the given context is still pending
  590. * @rot: Pointer to hw rotator
  591. * @ctx: Pointer to rotator context
  592. * @pswts: Pointer to returned reference software timestamp, optional
  593. * @return: true if context has pending requests
  594. */
  595. static int sde_hw_rotator_pending_swts(struct sde_hw_rotator *rot,
  596. struct sde_hw_rotator_context *ctx, u32 *pswts)
  597. {
  598. u32 swts;
  599. int ts_diff;
  600. bool pending;
  601. if (ctx->last_regdma_timestamp == SDE_REGDMA_SWTS_INVALID)
  602. swts = SDE_ROTREG_READ(rot->mdss_base, REGDMA_TIMESTAMP_REG);
  603. else
  604. swts = ctx->last_regdma_timestamp;
  605. if (ctx->q_id == ROT_QUEUE_LOW_PRIORITY)
  606. swts >>= SDE_REGDMA_SWTS_SHIFT;
  607. swts &= SDE_REGDMA_SWTS_MASK;
  608. ts_diff = sde_hw_rotator_elapsed_swts(ctx->timestamp, swts);
  609. if (pswts)
  610. *pswts = swts;
  611. pending = (ts_diff > 0) ? true : false;
  612. SDEROT_DBG("ts:0x%x, queue_id:%d, swts:0x%x, pending:%d\n",
  613. ctx->timestamp, ctx->q_id, swts, pending);
  614. SDEROT_EVTLOG(ctx->timestamp, swts, ctx->q_id, ts_diff);
  615. return pending;
  616. }
  617. /**
  618. * sde_hw_rotator_update_swts - update software timestamp with given value
  619. * @rot: Pointer to hw rotator
  620. * @q_id: rotator queue id
  621. * @swts: new software timestamp
  622. */
  623. static void sde_hw_rotator_update_swts(struct sde_hw_rotator *rot,
  624. u32 q_id, u32 swts)
  625. {
  626. u32 mask = SDE_REGDMA_SWTS_MASK;
  627. swts &= SDE_REGDMA_SWTS_MASK;
  628. if (q_id == ROT_QUEUE_LOW_PRIORITY) {
  629. swts <<= SDE_REGDMA_SWTS_SHIFT;
  630. mask <<= SDE_REGDMA_SWTS_SHIFT;
  631. }
  632. swts |= (SDE_ROTREG_READ(rot->mdss_base, REGDMA_TIMESTAMP_REG) & ~mask);
  633. SDE_ROTREG_WRITE(rot->mdss_base, REGDMA_TIMESTAMP_REG, swts);
  634. }
  635. /**
  636. * sde_hw_rotator_enable_irq - Enable hw rotator interrupt with ref. count
  637. * Also, clear rotator/regdma irq status.
  638. * @rot: Pointer to hw rotator
  639. */
  640. static void sde_hw_rotator_enable_irq(struct sde_hw_rotator *rot)
  641. {
  642. SDEROT_DBG("irq_num:%d enabled:%d\n", rot->irq_num,
  643. atomic_read(&rot->irq_enabled));
  644. if (!atomic_read(&rot->irq_enabled)) {
  645. if (rot->mode == ROT_REGDMA_OFF)
  646. SDE_ROTREG_WRITE(rot->mdss_base, ROTTOP_INTR_CLEAR,
  647. ROT_DONE_MASK);
  648. else
  649. SDE_ROTREG_WRITE(rot->mdss_base,
  650. REGDMA_CSR_REGDMA_INT_CLEAR, REGDMA_INT_MASK);
  651. enable_irq(rot->irq_num);
  652. }
  653. atomic_inc(&rot->irq_enabled);
  654. }
  655. /**
  656. * sde_hw_rotator_disable_irq - Disable hw rotator interrupt with ref. count
  657. * Also, clear rotator/regdma irq enable masks.
  658. * @rot: Pointer to hw rotator
  659. */
  660. static void sde_hw_rotator_disable_irq(struct sde_hw_rotator *rot)
  661. {
  662. SDEROT_DBG("irq_num:%d enabled:%d\n", rot->irq_num,
  663. atomic_read(&rot->irq_enabled));
  664. if (!atomic_read(&rot->irq_enabled)) {
  665. SDEROT_ERR("irq %d is already disabled\n", rot->irq_num);
  666. return;
  667. }
  668. if (!atomic_dec_return(&rot->irq_enabled)) {
  669. if (rot->mode == ROT_REGDMA_OFF)
  670. SDE_ROTREG_WRITE(rot->mdss_base, ROTTOP_INTR_EN, 0);
  671. else
  672. SDE_ROTREG_WRITE(rot->mdss_base,
  673. REGDMA_CSR_REGDMA_INT_EN, 0);
  674. /* disable irq after last pending irq is handled, if any */
  675. synchronize_irq(rot->irq_num);
  676. disable_irq_nosync(rot->irq_num);
  677. }
  678. }
  679. static int sde_hw_rotator_halt_vbif_xin_client(void)
  680. {
  681. struct sde_mdp_vbif_halt_params halt_params;
  682. int rc = 0;
  683. struct sde_rot_data_type *mdata = sde_rot_get_mdata();
  684. memset(&halt_params, 0, sizeof(struct sde_mdp_vbif_halt_params));
  685. halt_params.xin_id = mdata->vbif_xin_id[XIN_SSPP];
  686. halt_params.reg_off_mdp_clk_ctrl = MMSS_VBIF_NRT_VBIF_CLK_FORCE_CTRL0;
  687. halt_params.bit_off_mdp_clk_ctrl =
  688. MMSS_VBIF_NRT_VBIF_CLK_FORCE_CTRL0_XIN0;
  689. sde_mdp_halt_vbif_xin(&halt_params);
  690. rc |= halt_params.xin_timeout;
  691. memset(&halt_params, 0, sizeof(struct sde_mdp_vbif_halt_params));
  692. halt_params.xin_id = mdata->vbif_xin_id[XIN_WRITEBACK];
  693. halt_params.reg_off_mdp_clk_ctrl = MMSS_VBIF_NRT_VBIF_CLK_FORCE_CTRL0;
  694. halt_params.bit_off_mdp_clk_ctrl =
  695. MMSS_VBIF_NRT_VBIF_CLK_FORCE_CTRL0_XIN1;
  696. sde_mdp_halt_vbif_xin(&halt_params);
  697. rc |= halt_params.xin_timeout;
  698. return rc;
  699. }
  700. /**
  701. * sde_hw_rotator_reset - Reset rotator hardware
  702. * @rot: pointer to hw rotator
  703. * @ctx: pointer to current rotator context during the hw hang (optional)
  704. */
  705. static int sde_hw_rotator_reset(struct sde_hw_rotator *rot,
  706. struct sde_hw_rotator_context *ctx)
  707. {
  708. struct sde_hw_rotator_context *rctx = NULL;
  709. u32 int_mask = (REGDMA_INT_0_MASK | REGDMA_INT_1_MASK |
  710. REGDMA_INT_2_MASK);
  711. u32 last_ts[ROT_QUEUE_MAX] = {0,};
  712. u32 latest_ts, opmode;
  713. int elapsed_time, t;
  714. int i, j;
  715. unsigned long flags;
  716. if (!rot) {
  717. SDEROT_ERR("NULL rotator\n");
  718. return -EINVAL;
  719. }
  720. /* sw reset the hw rotator */
  721. SDE_ROTREG_WRITE(rot->mdss_base, ROTTOP_SW_RESET_OVERRIDE, 1);
  722. /* ensure write is issued to the rotator HW */
  723. wmb();
  724. usleep_range(MS_TO_US(10), MS_TO_US(20));
  725. /* force rotator into offline mode */
  726. opmode = SDE_ROTREG_READ(rot->mdss_base, ROTTOP_OP_MODE);
  727. SDE_ROTREG_WRITE(rot->mdss_base, ROTTOP_OP_MODE,
  728. opmode & ~(BIT(5) | BIT(4) | BIT(1) | BIT(0)));
  729. SDE_ROTREG_WRITE(rot->mdss_base, ROTTOP_SW_RESET_OVERRIDE, 0);
  730. /* halt vbif xin client to ensure no pending transaction */
  731. sde_hw_rotator_halt_vbif_xin_client();
  732. /* if no ctx is specified, skip ctx wake up */
  733. if (!ctx)
  734. return 0;
  735. if (ctx->q_id >= ROT_QUEUE_MAX) {
  736. SDEROT_ERR("context q_id out of range: %d\n", ctx->q_id);
  737. return -EINVAL;
  738. }
  739. spin_lock_irqsave(&rot->rotisr_lock, flags);
  740. /* update timestamp register with current context */
  741. last_ts[ctx->q_id] = ctx->timestamp;
  742. rot->ops.update_ts(rot, ctx->q_id, ctx->timestamp);
  743. SDEROT_EVTLOG(ctx->timestamp);
  744. /*
  745. * Search for any pending rot session, and look for last timestamp
  746. * per hw queue.
  747. */
  748. for (i = 0; i < ROT_QUEUE_MAX; i++) {
  749. latest_ts = atomic_read(&rot->timestamp[i]);
  750. latest_ts &= SDE_REGDMA_SWTS_MASK;
  751. elapsed_time = sde_hw_rotator_elapsed_swts(latest_ts,
  752. last_ts[i]);
  753. for (j = 0; j < SDE_HW_ROT_REGDMA_TOTAL_CTX; j++) {
  754. rctx = rot->rotCtx[i][j];
  755. if (rctx && rctx != ctx) {
  756. rctx->last_regdma_isr_status = int_mask;
  757. rctx->last_regdma_timestamp = rctx->timestamp;
  758. t = sde_hw_rotator_elapsed_swts(latest_ts,
  759. rctx->timestamp);
  760. if (t < elapsed_time) {
  761. elapsed_time = t;
  762. last_ts[i] = rctx->timestamp;
  763. rot->ops.update_ts(rot, i, last_ts[i]);
  764. }
  765. SDEROT_DBG("rotctx[%d][%d], ts:%d\n",
  766. i, j, rctx->timestamp);
  767. SDEROT_EVTLOG(i, j, rctx->timestamp,
  768. last_ts[i]);
  769. }
  770. }
  771. }
  772. /* Finally wakeup all pending rotator context in queue */
  773. for (i = 0; i < ROT_QUEUE_MAX; i++) {
  774. for (j = 0; j < SDE_HW_ROT_REGDMA_TOTAL_CTX; j++) {
  775. rctx = rot->rotCtx[i][j];
  776. if (rctx && rctx != ctx)
  777. wake_up_all(&rctx->regdma_waitq);
  778. }
  779. }
  780. spin_unlock_irqrestore(&rot->rotisr_lock, flags);
  781. return 0;
  782. }
  783. /**
  784. * _sde_hw_rotator_dump_status - Dump hw rotator status on error
  785. * @rot: Pointer to hw rotator
  786. */
  787. static void _sde_hw_rotator_dump_status(struct sde_hw_rotator *rot,
  788. u32 *ubwcerr)
  789. {
  790. struct sde_rot_data_type *mdata = sde_rot_get_mdata();
  791. u32 reg = 0;
  792. SDEROT_ERR(
  793. "op_mode = %x, int_en = %x, int_status = %x\n",
  794. SDE_ROTREG_READ(rot->mdss_base,
  795. REGDMA_CSR_REGDMA_OP_MODE),
  796. SDE_ROTREG_READ(rot->mdss_base,
  797. REGDMA_CSR_REGDMA_INT_EN),
  798. SDE_ROTREG_READ(rot->mdss_base,
  799. REGDMA_CSR_REGDMA_INT_STATUS));
  800. SDEROT_ERR(
  801. "ts0/ts1 = %x/%x, q0_status = %x, q1_status = %x, block_status = %x\n",
  802. __sde_hw_rotator_get_timestamp(rot, ROT_QUEUE_HIGH_PRIORITY),
  803. __sde_hw_rotator_get_timestamp(rot, ROT_QUEUE_LOW_PRIORITY),
  804. SDE_ROTREG_READ(rot->mdss_base,
  805. REGDMA_CSR_REGDMA_QUEUE_0_STATUS),
  806. SDE_ROTREG_READ(rot->mdss_base,
  807. REGDMA_CSR_REGDMA_QUEUE_1_STATUS),
  808. SDE_ROTREG_READ(rot->mdss_base,
  809. REGDMA_CSR_REGDMA_BLOCK_STATUS));
  810. SDEROT_ERR(
  811. "invalid_cmd_offset = %x, fsm_state = %x\n",
  812. SDE_ROTREG_READ(rot->mdss_base,
  813. REGDMA_CSR_REGDMA_INVALID_CMD_RAM_OFFSET),
  814. SDE_ROTREG_READ(rot->mdss_base,
  815. REGDMA_CSR_REGDMA_FSM_STATE));
  816. SDEROT_ERR("rottop: op_mode = %x, status = %x, clk_status = %x\n",
  817. SDE_ROTREG_READ(rot->mdss_base, ROTTOP_OP_MODE),
  818. SDE_ROTREG_READ(rot->mdss_base, ROTTOP_STATUS),
  819. SDE_ROTREG_READ(rot->mdss_base, ROTTOP_CLK_STATUS));
  820. reg = SDE_ROTREG_READ(rot->mdss_base, ROT_SSPP_UBWC_ERROR_STATUS);
  821. if (ubwcerr)
  822. *ubwcerr = reg;
  823. SDEROT_ERR(
  824. "UBWC decode status = %x, UBWC encode status = %x\n", reg,
  825. SDE_ROTREG_READ(rot->mdss_base, ROT_WB_UBWC_ERROR_STATUS));
  826. SDEROT_ERR("VBIF XIN HALT status = %x VBIF AXI HALT status = %x\n",
  827. SDE_VBIF_READ(mdata, MMSS_VBIF_XIN_HALT_CTRL1),
  828. SDE_VBIF_READ(mdata, MMSS_VBIF_AXI_HALT_CTRL1));
  829. SDEROT_ERR("sspp unpack wr: plane0 = %x, plane1 = %x, plane2 = %x\n",
  830. SDE_ROTREG_READ(rot->mdss_base,
  831. ROT_SSPP_FETCH_SMP_WR_PLANE0),
  832. SDE_ROTREG_READ(rot->mdss_base,
  833. ROT_SSPP_FETCH_SMP_WR_PLANE1),
  834. SDE_ROTREG_READ(rot->mdss_base,
  835. ROT_SSPP_FETCH_SMP_WR_PLANE2));
  836. SDEROT_ERR("sspp unpack rd: plane0 = %x, plane1 = %x, plane2 = %x\n",
  837. SDE_ROTREG_READ(rot->mdss_base,
  838. ROT_SSPP_SMP_UNPACK_RD_PLANE0),
  839. SDE_ROTREG_READ(rot->mdss_base,
  840. ROT_SSPP_SMP_UNPACK_RD_PLANE1),
  841. SDE_ROTREG_READ(rot->mdss_base,
  842. ROT_SSPP_SMP_UNPACK_RD_PLANE2));
  843. SDEROT_ERR("sspp: unpack_ln = %x, unpack_blk = %x, fill_lvl = %x\n",
  844. SDE_ROTREG_READ(rot->mdss_base,
  845. ROT_SSPP_UNPACK_LINE_COUNT),
  846. SDE_ROTREG_READ(rot->mdss_base,
  847. ROT_SSPP_UNPACK_BLK_COUNT),
  848. SDE_ROTREG_READ(rot->mdss_base,
  849. ROT_SSPP_FILL_LEVELS));
  850. SDEROT_ERR("wb: sbuf0 = %x, sbuf1 = %x, sys_cache = %x\n",
  851. SDE_ROTREG_READ(rot->mdss_base,
  852. ROT_WB_SBUF_STATUS_PLANE0),
  853. SDE_ROTREG_READ(rot->mdss_base,
  854. ROT_WB_SBUF_STATUS_PLANE1),
  855. SDE_ROTREG_READ(rot->mdss_base,
  856. ROT_WB_SYS_CACHE_MODE));
  857. }
  858. /**
  859. * sde_hw_rotator_get_ctx(): Retrieve rotator context from rotator HW based
  860. * on provided session_id. Each rotator has a different session_id.
  861. * @rot: Pointer to rotator hw
  862. * @session_id: Identifier for rotator session
  863. * @sequence_id: Identifier for rotation request within the session
  864. * @q_id: Rotator queue identifier
  865. */
  866. static struct sde_hw_rotator_context *sde_hw_rotator_get_ctx(
  867. struct sde_hw_rotator *rot, u32 session_id, u32 sequence_id,
  868. enum sde_rot_queue_prio q_id)
  869. {
  870. int i;
  871. struct sde_hw_rotator_context *ctx = NULL;
  872. for (i = 0; i < SDE_HW_ROT_REGDMA_TOTAL_CTX; i++) {
  873. ctx = rot->rotCtx[q_id][i];
  874. if (ctx && (ctx->session_id == session_id) &&
  875. (ctx->sequence_id == sequence_id)) {
  876. SDEROT_DBG(
  877. "rotCtx sloti[%d][%d] ==> ctx:%pK | session-id:%d | sequence-id:%d\n",
  878. q_id, i, ctx, ctx->session_id,
  879. ctx->sequence_id);
  880. return ctx;
  881. }
  882. }
  883. return NULL;
  884. }
  885. /*
  886. * sde_hw_rotator_map_vaddr - map the debug buffer to kernel space
  887. * @dbgbuf: Pointer to debug buffer
  888. * @buf: Pointer to layer buffer structure
  889. * @data: Pointer to h/w mapped buffer structure
  890. */
  891. static void sde_hw_rotator_map_vaddr(struct sde_dbg_buf *dbgbuf,
  892. struct sde_layer_buffer *buf, struct sde_mdp_data *data)
  893. {
  894. dbgbuf->dmabuf = data->p[0].srcp_dma_buf;
  895. dbgbuf->buflen = data->p[0].srcp_dma_buf->size;
  896. dbgbuf->vaddr = NULL;
  897. dbgbuf->width = buf->width;
  898. dbgbuf->height = buf->height;
  899. if (dbgbuf->dmabuf && (dbgbuf->buflen > 0)) {
  900. dma_buf_begin_cpu_access(dbgbuf->dmabuf, DMA_FROM_DEVICE);
  901. dbgbuf->vaddr = dma_buf_kmap(dbgbuf->dmabuf, 0);
  902. SDEROT_DBG("vaddr mapping: 0x%pK/%ld w:%d/h:%d\n",
  903. dbgbuf->vaddr, dbgbuf->buflen,
  904. dbgbuf->width, dbgbuf->height);
  905. }
  906. }
  907. /*
  908. * sde_hw_rotator_unmap_vaddr - unmap the debug buffer from kernel space
  909. * @dbgbuf: Pointer to debug buffer
  910. */
  911. static void sde_hw_rotator_unmap_vaddr(struct sde_dbg_buf *dbgbuf)
  912. {
  913. if (dbgbuf->vaddr) {
  914. dma_buf_kunmap(dbgbuf->dmabuf, 0, dbgbuf->vaddr);
  915. dma_buf_end_cpu_access(dbgbuf->dmabuf, DMA_FROM_DEVICE);
  916. }
  917. dbgbuf->vaddr = NULL;
  918. dbgbuf->dmabuf = NULL;
  919. dbgbuf->buflen = 0;
  920. dbgbuf->width = 0;
  921. dbgbuf->height = 0;
  922. }
  923. static void sde_hw_rotator_vbif_rt_setting(void)
  924. {
  925. u32 reg_high, reg_shift, reg_val, reg_val_lvl, mask, vbif_qos;
  926. struct sde_rot_data_type *mdata = sde_rot_get_mdata();
  927. int i, j;
  928. vbif_lock(mdata->parent_pdev);
  929. for (i = 0; i < mdata->npriority_lvl; i++) {
  930. for (j = 0; j < MAX_XIN; j++) {
  931. reg_high = ((mdata->vbif_xin_id[j]
  932. & 0x8) >> 3) * 4 + (i * 8);
  933. reg_shift = mdata->vbif_xin_id[j] * 4;
  934. reg_val = SDE_VBIF_READ(mdata,
  935. MMSS_VBIF_NRT_VBIF_QOS_RP_REMAP_000 + reg_high);
  936. reg_val_lvl = SDE_VBIF_READ(mdata,
  937. MMSS_VBIF_NRT_VBIF_QOS_LVL_REMAP_000 + reg_high);
  938. mask = 0x7 << (mdata->vbif_xin_id[j] * 4);
  939. vbif_qos = mdata->vbif_nrt_qos[i];
  940. reg_val &= ~mask;
  941. reg_val |= (vbif_qos << reg_shift) & mask;
  942. reg_val_lvl &= ~mask;
  943. reg_val_lvl |= (vbif_qos << reg_shift) & mask;
  944. SDE_VBIF_WRITE(mdata,
  945. MMSS_VBIF_NRT_VBIF_QOS_RP_REMAP_000 + reg_high,
  946. reg_val);
  947. SDE_VBIF_WRITE(mdata,
  948. MMSS_VBIF_NRT_VBIF_QOS_LVL_REMAP_000 + reg_high,
  949. reg_val_lvl);
  950. }
  951. }
  952. vbif_unlock(mdata->parent_pdev);
  953. }
  954. /*
  955. * sde_hw_rotator_vbif_setting - helper function to set vbif QoS remapper
  956. * levels, enable write gather enable and avoid clk gating setting for
  957. * debug purpose.
  958. *
  959. * @rot: Pointer to rotator hw
  960. */
  961. static void sde_hw_rotator_vbif_setting(struct sde_hw_rotator *rot)
  962. {
  963. u32 i, mask, vbif_qos, reg_val = 0;
  964. struct sde_rot_data_type *mdata = sde_rot_get_mdata();
  965. /* VBIF_ROT QoS remapper setting */
  966. switch (mdata->npriority_lvl) {
  967. case SDE_MDP_VBIF_4_LEVEL_REMAPPER:
  968. for (i = 0; i < mdata->npriority_lvl; i++) {
  969. reg_val = SDE_VBIF_READ(mdata,
  970. MMSS_VBIF_NRT_VBIF_QOS_REMAP_00 + i*4);
  971. mask = 0x3 << (XIN_SSPP * 2);
  972. vbif_qos = mdata->vbif_nrt_qos[i];
  973. reg_val |= vbif_qos << (XIN_SSPP * 2);
  974. /* ensure write is issued after the read operation */
  975. mb();
  976. SDE_VBIF_WRITE(mdata,
  977. MMSS_VBIF_NRT_VBIF_QOS_REMAP_00 + i*4,
  978. reg_val);
  979. }
  980. break;
  981. case SDE_MDP_VBIF_8_LEVEL_REMAPPER:
  982. mask = mdata->npriority_lvl - 1;
  983. for (i = 0; i < mdata->npriority_lvl; i++) {
  984. /* RD and WR client */
  985. reg_val |= (mdata->vbif_nrt_qos[i] & mask)
  986. << (XIN_SSPP * 4);
  987. reg_val |= (mdata->vbif_nrt_qos[i] & mask)
  988. << (XIN_WRITEBACK * 4);
  989. SDE_VBIF_WRITE(mdata,
  990. MMSS_VBIF_NRT_VBIF_QOS_RP_REMAP_000 + i*8,
  991. reg_val);
  992. SDE_VBIF_WRITE(mdata,
  993. MMSS_VBIF_NRT_VBIF_QOS_LVL_REMAP_000 + i*8,
  994. reg_val);
  995. }
  996. break;
  997. default:
  998. SDEROT_DBG("invalid vbif remapper levels\n");
  999. }
  1000. /* Enable write gather for writeback to remove write gaps, which
  1001. * may hang AXI/BIMC/SDE.
  1002. */
  1003. SDE_VBIF_WRITE(mdata, MMSS_VBIF_NRT_VBIF_WRITE_GATHTER_EN,
  1004. BIT(XIN_WRITEBACK));
  1005. /*
  1006. * For debug purpose, disable clock gating, i.e. Clocks always on
  1007. */
  1008. if (mdata->clk_always_on) {
  1009. SDE_VBIF_WRITE(mdata, MMSS_VBIF_CLKON, 0x3);
  1010. SDE_VBIF_WRITE(mdata, MMSS_VBIF_NRT_VBIF_CLK_FORCE_CTRL0, 0x3);
  1011. SDE_VBIF_WRITE(mdata, MMSS_VBIF_NRT_VBIF_CLK_FORCE_CTRL1,
  1012. 0xFFFF);
  1013. SDE_ROTREG_WRITE(rot->mdss_base, ROTTOP_CLK_CTRL, 1);
  1014. }
  1015. }
  1016. /*
  1017. * sde_hw_rotator_setup_timestamp_packet - setup timestamp writeback command
  1018. * @ctx: Pointer to rotator context
  1019. * @mask: Bit mask location of the timestamp
  1020. * @swts: Software timestamp
  1021. */
  1022. static void sde_hw_rotator_setup_timestamp_packet(
  1023. struct sde_hw_rotator_context *ctx, u32 mask, u32 swts)
  1024. {
  1025. char __iomem *wrptr;
  1026. wrptr = sde_hw_rotator_get_regdma_segment(ctx);
  1027. /*
  1028. * Create a dummy packet write out to 1 location for timestamp
  1029. * generation.
  1030. */
  1031. SDE_REGDMA_BLKWRITE_INC(wrptr, ROT_SSPP_SRC_SIZE, 6);
  1032. SDE_REGDMA_BLKWRITE_DATA(wrptr, 0x00010001);
  1033. SDE_REGDMA_BLKWRITE_DATA(wrptr, 0);
  1034. SDE_REGDMA_BLKWRITE_DATA(wrptr, 0);
  1035. SDE_REGDMA_BLKWRITE_DATA(wrptr, 0x00010001);
  1036. SDE_REGDMA_BLKWRITE_DATA(wrptr, 0);
  1037. SDE_REGDMA_BLKWRITE_DATA(wrptr, ctx->ts_addr);
  1038. SDE_REGDMA_WRITE(wrptr, ROT_SSPP_SRC_YSTRIDE0, 4);
  1039. SDE_REGDMA_BLKWRITE_INC(wrptr, ROT_SSPP_SRC_FORMAT, 4);
  1040. SDE_REGDMA_BLKWRITE_DATA(wrptr, 0x004037FF);
  1041. SDE_REGDMA_BLKWRITE_DATA(wrptr, 0x03020100);
  1042. SDE_REGDMA_BLKWRITE_DATA(wrptr, 0x80000000);
  1043. SDE_REGDMA_BLKWRITE_DATA(wrptr, ctx->timestamp);
  1044. /*
  1045. * Must clear secure buffer setting for SW timestamp because
  1046. * SW timstamp buffer allocation is always non-secure region.
  1047. */
  1048. if (ctx->is_secure) {
  1049. SDE_REGDMA_WRITE(wrptr, ROT_SSPP_SRC_ADDR_SW_STATUS, 0);
  1050. SDE_REGDMA_WRITE(wrptr, ROT_WB_DST_ADDR_SW_STATUS, 0);
  1051. }
  1052. SDE_REGDMA_BLKWRITE_INC(wrptr, ROT_WB_DST_FORMAT, 4);
  1053. SDE_REGDMA_BLKWRITE_DATA(wrptr, 0x000037FF);
  1054. SDE_REGDMA_BLKWRITE_DATA(wrptr, 0);
  1055. SDE_REGDMA_BLKWRITE_DATA(wrptr, 0x03020100);
  1056. SDE_REGDMA_BLKWRITE_DATA(wrptr, ctx->ts_addr);
  1057. SDE_REGDMA_WRITE(wrptr, ROT_WB_DST_YSTRIDE0, 4);
  1058. SDE_REGDMA_WRITE(wrptr, ROT_WB_OUT_SIZE, 0x00010001);
  1059. SDE_REGDMA_WRITE(wrptr, ROT_WB_OUT_IMG_SIZE, 0x00010001);
  1060. SDE_REGDMA_WRITE(wrptr, ROT_WB_OUT_XY, 0);
  1061. SDE_REGDMA_WRITE(wrptr, ROT_WB_DST_WRITE_CONFIG,
  1062. (ctx->rot->highest_bank & 0x3) << 8);
  1063. SDE_REGDMA_WRITE(wrptr, ROTTOP_DNSC, 0);
  1064. SDE_REGDMA_WRITE(wrptr, ROTTOP_OP_MODE, 1);
  1065. SDE_REGDMA_MODIFY(wrptr, REGDMA_TIMESTAMP_REG, mask, swts);
  1066. SDE_REGDMA_WRITE(wrptr, ROTTOP_START_CTRL, 1);
  1067. sde_hw_rotator_put_regdma_segment(ctx, wrptr);
  1068. }
  1069. /*
  1070. * sde_hw_rotator_cdp_configs - configures the CDP registers
  1071. * @ctx: Pointer to rotator context
  1072. * @params: Pointer to parameters needed for CDP configs
  1073. */
  1074. static void sde_hw_rotator_cdp_configs(struct sde_hw_rotator_context *ctx,
  1075. struct sde_rot_cdp_params *params)
  1076. {
  1077. int reg_val;
  1078. char __iomem *wrptr = sde_hw_rotator_get_regdma_segment(ctx);
  1079. if (!params->enable) {
  1080. SDE_REGDMA_WRITE(wrptr, params->offset, 0x0);
  1081. goto end;
  1082. }
  1083. reg_val = BIT(0); /* enable cdp */
  1084. if (sde_mdp_is_ubwc_format(params->fmt))
  1085. reg_val |= BIT(1); /* enable UBWC meta cdp */
  1086. if (sde_mdp_is_ubwc_format(params->fmt)
  1087. || sde_mdp_is_tilea4x_format(params->fmt)
  1088. || sde_mdp_is_tilea5x_format(params->fmt))
  1089. reg_val |= BIT(2); /* enable tile amortize */
  1090. reg_val |= BIT(3); /* enable preload addr ahead cnt 64 */
  1091. SDE_REGDMA_WRITE(wrptr, params->offset, reg_val);
  1092. end:
  1093. sde_hw_rotator_put_regdma_segment(ctx, wrptr);
  1094. }
  1095. /*
  1096. * sde_hw_rotator_setup_qos_lut_wr - Set QoS LUT/Danger LUT/Safe LUT configs
  1097. * for the WRITEBACK rotator for inline and offline rotation.
  1098. *
  1099. * @ctx: Pointer to rotator context
  1100. */
  1101. static void sde_hw_rotator_setup_qos_lut_wr(struct sde_hw_rotator_context *ctx)
  1102. {
  1103. struct sde_rot_data_type *mdata = sde_rot_get_mdata();
  1104. char __iomem *wrptr = sde_hw_rotator_get_regdma_segment(ctx);
  1105. /* Offline rotation setting */
  1106. if (!ctx->sbuf_mode) {
  1107. /* QOS LUT WR setting */
  1108. if (test_bit(SDE_QOS_LUT, mdata->sde_qos_map)) {
  1109. SDE_REGDMA_WRITE(wrptr, ROT_WB_CREQ_LUT_0,
  1110. mdata->lut_cfg[SDE_ROT_WR].creq_lut_0);
  1111. SDE_REGDMA_WRITE(wrptr, ROT_WB_CREQ_LUT_1,
  1112. mdata->lut_cfg[SDE_ROT_WR].creq_lut_1);
  1113. }
  1114. /* Danger LUT WR setting */
  1115. if (test_bit(SDE_QOS_DANGER_LUT, mdata->sde_qos_map))
  1116. SDE_REGDMA_WRITE(wrptr, ROT_WB_DANGER_LUT,
  1117. mdata->lut_cfg[SDE_ROT_WR].danger_lut);
  1118. /* Safe LUT WR setting */
  1119. if (test_bit(SDE_QOS_SAFE_LUT, mdata->sde_qos_map))
  1120. SDE_REGDMA_WRITE(wrptr, ROT_WB_SAFE_LUT,
  1121. mdata->lut_cfg[SDE_ROT_WR].safe_lut);
  1122. /* Inline rotation setting */
  1123. } else {
  1124. /* QOS LUT WR setting */
  1125. if (test_bit(SDE_INLINE_QOS_LUT, mdata->sde_inline_qos_map)) {
  1126. SDE_REGDMA_WRITE(wrptr, ROT_WB_CREQ_LUT_0,
  1127. mdata->inline_lut_cfg[SDE_ROT_WR].creq_lut_0);
  1128. SDE_REGDMA_WRITE(wrptr, ROT_WB_CREQ_LUT_1,
  1129. mdata->inline_lut_cfg[SDE_ROT_WR].creq_lut_1);
  1130. }
  1131. /* Danger LUT WR setting */
  1132. if (test_bit(SDE_INLINE_QOS_DANGER_LUT,
  1133. mdata->sde_inline_qos_map))
  1134. SDE_REGDMA_WRITE(wrptr, ROT_WB_DANGER_LUT,
  1135. mdata->inline_lut_cfg[SDE_ROT_WR].danger_lut);
  1136. /* Safe LUT WR setting */
  1137. if (test_bit(SDE_INLINE_QOS_SAFE_LUT,
  1138. mdata->sde_inline_qos_map))
  1139. SDE_REGDMA_WRITE(wrptr, ROT_WB_SAFE_LUT,
  1140. mdata->inline_lut_cfg[SDE_ROT_WR].safe_lut);
  1141. }
  1142. /* Update command queue write ptr */
  1143. sde_hw_rotator_put_regdma_segment(ctx, wrptr);
  1144. }
  1145. /*
  1146. * sde_hw_rotator_setup_qos_lut_rd - Set QoS LUT/Danger LUT/Safe LUT configs
  1147. * for the SSPP rotator for inline and offline rotation.
  1148. *
  1149. * @ctx: Pointer to rotator context
  1150. */
  1151. static void sde_hw_rotator_setup_qos_lut_rd(struct sde_hw_rotator_context *ctx)
  1152. {
  1153. struct sde_rot_data_type *mdata = sde_rot_get_mdata();
  1154. char __iomem *wrptr = sde_hw_rotator_get_regdma_segment(ctx);
  1155. /* Offline rotation setting */
  1156. if (!ctx->sbuf_mode) {
  1157. /* QOS LUT RD setting */
  1158. if (test_bit(SDE_QOS_LUT, mdata->sde_qos_map)) {
  1159. SDE_REGDMA_WRITE(wrptr, ROT_SSPP_CREQ_LUT_0,
  1160. mdata->lut_cfg[SDE_ROT_RD].creq_lut_0);
  1161. SDE_REGDMA_WRITE(wrptr, ROT_SSPP_CREQ_LUT_1,
  1162. mdata->lut_cfg[SDE_ROT_RD].creq_lut_1);
  1163. }
  1164. /* Danger LUT RD setting */
  1165. if (test_bit(SDE_QOS_DANGER_LUT, mdata->sde_qos_map))
  1166. SDE_REGDMA_WRITE(wrptr, ROT_SSPP_DANGER_LUT,
  1167. mdata->lut_cfg[SDE_ROT_RD].danger_lut);
  1168. /* Safe LUT RD setting */
  1169. if (test_bit(SDE_QOS_SAFE_LUT, mdata->sde_qos_map))
  1170. SDE_REGDMA_WRITE(wrptr, ROT_SSPP_SAFE_LUT,
  1171. mdata->lut_cfg[SDE_ROT_RD].safe_lut);
  1172. /* inline rotation setting */
  1173. } else {
  1174. /* QOS LUT RD setting */
  1175. if (test_bit(SDE_INLINE_QOS_LUT, mdata->sde_inline_qos_map)) {
  1176. SDE_REGDMA_WRITE(wrptr, ROT_SSPP_CREQ_LUT_0,
  1177. mdata->inline_lut_cfg[SDE_ROT_RD].creq_lut_0);
  1178. SDE_REGDMA_WRITE(wrptr, ROT_SSPP_CREQ_LUT_1,
  1179. mdata->inline_lut_cfg[SDE_ROT_RD].creq_lut_1);
  1180. }
  1181. /* Danger LUT RD setting */
  1182. if (test_bit(SDE_INLINE_QOS_DANGER_LUT,
  1183. mdata->sde_inline_qos_map))
  1184. SDE_REGDMA_WRITE(wrptr, ROT_SSPP_DANGER_LUT,
  1185. mdata->inline_lut_cfg[SDE_ROT_RD].danger_lut);
  1186. /* Safe LUT RD setting */
  1187. if (test_bit(SDE_INLINE_QOS_SAFE_LUT,
  1188. mdata->sde_inline_qos_map))
  1189. SDE_REGDMA_WRITE(wrptr, ROT_SSPP_SAFE_LUT,
  1190. mdata->inline_lut_cfg[SDE_ROT_RD].safe_lut);
  1191. }
  1192. /* Update command queue write ptr */
  1193. sde_hw_rotator_put_regdma_segment(ctx, wrptr);
  1194. }
  1195. static void sde_hw_rotator_setup_fetchengine_helper(
  1196. struct sde_hw_rot_sspp_cfg *cfg,
  1197. struct sde_rot_data_type *mdata,
  1198. struct sde_hw_rotator_context *ctx, char __iomem *wrptr,
  1199. u32 flags, u32 *width, u32 *height)
  1200. {
  1201. int i;
  1202. /*
  1203. * initialize start control trigger selection first
  1204. */
  1205. if (test_bit(SDE_CAPS_SBUF_1, mdata->sde_caps_map)) {
  1206. if (ctx->sbuf_mode)
  1207. SDE_REGDMA_WRITE(wrptr, ROTTOP_START_CTRL,
  1208. ctx->start_ctrl);
  1209. else
  1210. SDE_REGDMA_WRITE(wrptr, ROTTOP_START_CTRL, 0);
  1211. }
  1212. /* source image setup */
  1213. if ((flags & SDE_ROT_FLAG_DEINTERLACE)
  1214. && !(flags & SDE_ROT_FLAG_SOURCE_ROTATED_90)) {
  1215. for (i = 0; i < cfg->src_plane.num_planes; i++)
  1216. cfg->src_plane.ystride[i] *= 2;
  1217. *width *= 2;
  1218. *height /= 2;
  1219. }
  1220. }
  1221. /*
  1222. * sde_hw_rotator_setup_fetchengine - setup fetch engine
  1223. * @ctx: Pointer to rotator context
  1224. * @queue_id: Priority queue identifier
  1225. * @cfg: Fetch configuration
  1226. * @danger_lut: real-time QoS LUT for danger setting (not used)
  1227. * @safe_lut: real-time QoS LUT for safe setting (not used)
  1228. * @dnsc_factor_w: downscale factor for width
  1229. * @dnsc_factor_h: downscale factor for height
  1230. * @flags: Control flag
  1231. */
  1232. static void sde_hw_rotator_setup_fetchengine(struct sde_hw_rotator_context *ctx,
  1233. enum sde_rot_queue_prio queue_id,
  1234. struct sde_hw_rot_sspp_cfg *cfg, u32 danger_lut, u32 safe_lut,
  1235. u32 dnsc_factor_w, u32 dnsc_factor_h, u32 flags)
  1236. {
  1237. struct sde_hw_rotator *rot = ctx->rot;
  1238. struct sde_mdp_format_params *fmt;
  1239. struct sde_mdp_data *data;
  1240. struct sde_rot_cdp_params cdp_params = {0};
  1241. struct sde_rot_data_type *mdata = sde_rot_get_mdata();
  1242. char __iomem *wrptr;
  1243. u32 opmode = 0;
  1244. u32 chroma_samp = 0;
  1245. u32 src_format = 0;
  1246. u32 unpack = 0;
  1247. u32 width = cfg->img_width;
  1248. u32 height = cfg->img_height;
  1249. u32 fetch_blocksize = 0;
  1250. int i;
  1251. if (ctx->rot->mode == ROT_REGDMA_ON) {
  1252. if (rot->irq_num >= 0)
  1253. SDE_ROTREG_WRITE(rot->mdss_base,
  1254. REGDMA_CSR_REGDMA_INT_EN,
  1255. REGDMA_INT_MASK);
  1256. SDE_ROTREG_WRITE(rot->mdss_base, REGDMA_CSR_REGDMA_OP_MODE,
  1257. REGDMA_EN);
  1258. }
  1259. wrptr = sde_hw_rotator_get_regdma_segment(ctx);
  1260. sde_hw_rotator_setup_fetchengine_helper(cfg, mdata, ctx, wrptr,
  1261. flags, &width, &height);
  1262. /*
  1263. * REGDMA BLK write from SRC_SIZE to OP_MODE, total 15 registers
  1264. */
  1265. SDE_REGDMA_BLKWRITE_INC(wrptr, ROT_SSPP_SRC_SIZE, 15);
  1266. /* SRC_SIZE, SRC_IMG_SIZE, SRC_XY, OUT_SIZE, OUT_XY */
  1267. SDE_REGDMA_BLKWRITE_DATA(wrptr,
  1268. cfg->src_rect->w | (cfg->src_rect->h << 16));
  1269. SDE_REGDMA_BLKWRITE_DATA(wrptr, 0); /* SRC_IMG_SIZE unused */
  1270. SDE_REGDMA_BLKWRITE_DATA(wrptr,
  1271. cfg->src_rect->x | (cfg->src_rect->y << 16));
  1272. SDE_REGDMA_BLKWRITE_DATA(wrptr,
  1273. cfg->src_rect->w | (cfg->src_rect->h << 16));
  1274. SDE_REGDMA_BLKWRITE_DATA(wrptr,
  1275. cfg->src_rect->x | (cfg->src_rect->y << 16));
  1276. /* SRC_ADDR [0-3], SRC_YSTRIDE [0-1] */
  1277. data = cfg->data;
  1278. for (i = 0; i < SDE_ROT_MAX_PLANES; i++)
  1279. SDE_REGDMA_BLKWRITE_DATA(wrptr, data->p[i].addr);
  1280. SDE_REGDMA_BLKWRITE_DATA(wrptr, cfg->src_plane.ystride[0] |
  1281. (cfg->src_plane.ystride[1] << 16));
  1282. SDE_REGDMA_BLKWRITE_DATA(wrptr, cfg->src_plane.ystride[2] |
  1283. (cfg->src_plane.ystride[3] << 16));
  1284. /* UNUSED, write 0 */
  1285. SDE_REGDMA_BLKWRITE_DATA(wrptr, 0);
  1286. /* setup source format */
  1287. fmt = cfg->fmt;
  1288. chroma_samp = fmt->chroma_sample;
  1289. if (flags & SDE_ROT_FLAG_SOURCE_ROTATED_90) {
  1290. if (chroma_samp == SDE_MDP_CHROMA_H2V1)
  1291. chroma_samp = SDE_MDP_CHROMA_H1V2;
  1292. else if (chroma_samp == SDE_MDP_CHROMA_H1V2)
  1293. chroma_samp = SDE_MDP_CHROMA_H2V1;
  1294. }
  1295. src_format = (chroma_samp << 23) |
  1296. (fmt->fetch_planes << 19) |
  1297. (fmt->bits[C3_ALPHA] << 6) |
  1298. (fmt->bits[C2_R_Cr] << 4) |
  1299. (fmt->bits[C1_B_Cb] << 2) |
  1300. (fmt->bits[C0_G_Y] << 0);
  1301. if (fmt->alpha_enable &&
  1302. (fmt->fetch_planes == SDE_MDP_PLANE_INTERLEAVED))
  1303. src_format |= BIT(8); /* SRCC3_EN */
  1304. src_format |= ((fmt->unpack_count - 1) << 12) |
  1305. (fmt->unpack_tight << 17) |
  1306. (fmt->unpack_align_msb << 18) |
  1307. ((fmt->bpp - 1) << 9) |
  1308. ((fmt->frame_format & 3) << 30);
  1309. if (flags & SDE_ROT_FLAG_ROT_90)
  1310. src_format |= BIT(11); /* ROT90 */
  1311. if (sde_mdp_is_ubwc_format(fmt))
  1312. opmode |= BIT(0); /* BWC_DEC_EN */
  1313. /* if this is YUV pixel format, enable CSC */
  1314. if (sde_mdp_is_yuv_format(fmt))
  1315. src_format |= BIT(15); /* SRC_COLOR_SPACE */
  1316. if (fmt->pixel_mode == SDE_MDP_PIXEL_10BIT)
  1317. src_format |= BIT(14); /* UNPACK_DX_FORMAT */
  1318. if (rot->solid_fill)
  1319. src_format |= BIT(22); /* SOLID_FILL */
  1320. /* SRC_FORMAT */
  1321. SDE_REGDMA_BLKWRITE_DATA(wrptr, src_format);
  1322. /* setup source unpack pattern */
  1323. unpack = (fmt->element[3] << 24) | (fmt->element[2] << 16) |
  1324. (fmt->element[1] << 8) | (fmt->element[0] << 0);
  1325. /* SRC_UNPACK_PATTERN */
  1326. SDE_REGDMA_BLKWRITE_DATA(wrptr, unpack);
  1327. /* setup source op mode */
  1328. if (flags & SDE_ROT_FLAG_FLIP_LR)
  1329. opmode |= BIT(13); /* FLIP_MODE L/R horizontal flip */
  1330. if (flags & SDE_ROT_FLAG_FLIP_UD)
  1331. opmode |= BIT(14); /* FLIP_MODE U/D vertical flip */
  1332. opmode |= BIT(31); /* MDSS_MDP_OP_PE_OVERRIDE */
  1333. /* SRC_OP_MODE */
  1334. SDE_REGDMA_BLKWRITE_DATA(wrptr, opmode);
  1335. /* setup source fetch config, TP10 uses different block size */
  1336. if (test_bit(SDE_CAPS_R3_1P5_DOWNSCALE, mdata->sde_caps_map) &&
  1337. (dnsc_factor_w == 1) && (dnsc_factor_h == 1)) {
  1338. if (sde_mdp_is_tp10_format(fmt))
  1339. fetch_blocksize = SDE_ROT_SSPP_FETCH_BLOCKSIZE_144_EXT;
  1340. else
  1341. fetch_blocksize = SDE_ROT_SSPP_FETCH_BLOCKSIZE_192_EXT;
  1342. } else {
  1343. if (sde_mdp_is_tp10_format(fmt))
  1344. fetch_blocksize = SDE_ROT_SSPP_FETCH_BLOCKSIZE_96;
  1345. else
  1346. fetch_blocksize = SDE_ROT_SSPP_FETCH_BLOCKSIZE_128;
  1347. }
  1348. if (rot->solid_fill)
  1349. SDE_REGDMA_WRITE(wrptr, ROT_SSPP_SRC_CONSTANT_COLOR,
  1350. rot->constant_color);
  1351. SDE_REGDMA_WRITE(wrptr, ROT_SSPP_FETCH_CONFIG,
  1352. fetch_blocksize |
  1353. SDE_ROT_SSPP_FETCH_CONFIG_RESET_VALUE |
  1354. ((rot->highest_bank & 0x3) << 18));
  1355. if (test_bit(SDE_CAPS_UBWC_2, mdata->sde_caps_map))
  1356. SDE_REGDMA_WRITE(wrptr, ROT_SSPP_UBWC_STATIC_CTRL,
  1357. ((ctx->rot->ubwc_malsize & 0x3) << 8) |
  1358. ((ctx->rot->highest_bank & 0x3) << 4) |
  1359. ((ctx->rot->ubwc_swizzle & 0x1) << 0));
  1360. else if (test_bit(SDE_CAPS_UBWC_3, mdata->sde_caps_map) ||
  1361. test_bit(SDE_CAPS_UBWC_4, mdata->sde_caps_map))
  1362. SDE_REGDMA_WRITE(wrptr, ROT_SSPP_UBWC_STATIC_CTRL, BIT(30));
  1363. /* setup source buffer plane security status */
  1364. if (flags & (SDE_ROT_FLAG_SECURE_OVERLAY_SESSION |
  1365. SDE_ROT_FLAG_SECURE_CAMERA_SESSION)) {
  1366. SDE_REGDMA_WRITE(wrptr, ROT_SSPP_SRC_ADDR_SW_STATUS, 0xF);
  1367. ctx->is_secure = true;
  1368. } else {
  1369. SDE_REGDMA_WRITE(wrptr, ROT_SSPP_SRC_ADDR_SW_STATUS, 0);
  1370. ctx->is_secure = false;
  1371. }
  1372. /* Update command queue write ptr */
  1373. sde_hw_rotator_put_regdma_segment(ctx, wrptr);
  1374. /* CDP register RD setting */
  1375. cdp_params.enable = test_bit(SDE_QOS_CDP, mdata->sde_qos_map) ?
  1376. mdata->enable_cdp[SDE_ROT_RD] : false;
  1377. cdp_params.fmt = fmt;
  1378. cdp_params.offset = ROT_SSPP_CDP_CNTL;
  1379. sde_hw_rotator_cdp_configs(ctx, &cdp_params);
  1380. /* QOS LUT/ Danger LUT/ Safe Lut WR setting */
  1381. sde_hw_rotator_setup_qos_lut_rd(ctx);
  1382. wrptr = sde_hw_rotator_get_regdma_segment(ctx);
  1383. /*
  1384. * Determine if traffic shaping is required. Only enable traffic
  1385. * shaping when content is 4k@30fps. The actual traffic shaping
  1386. * bandwidth calculation is done in output setup.
  1387. */
  1388. if (((!ctx->sbuf_mode)
  1389. && (cfg->src_rect->w * cfg->src_rect->h) >= RES_UHD)
  1390. && (cfg->fps <= 30)) {
  1391. SDEROT_DBG("Enable Traffic Shaper\n");
  1392. ctx->is_traffic_shaping = true;
  1393. } else {
  1394. SDEROT_DBG("Disable Traffic Shaper\n");
  1395. ctx->is_traffic_shaping = false;
  1396. }
  1397. /* Update command queue write ptr */
  1398. sde_hw_rotator_put_regdma_segment(ctx, wrptr);
  1399. }
  1400. /*
  1401. * sde_hw_rotator_setup_wbengine - setup writeback engine
  1402. * @ctx: Pointer to rotator context
  1403. * @queue_id: Priority queue identifier
  1404. * @cfg: Writeback configuration
  1405. * @flags: Control flag
  1406. */
  1407. static void sde_hw_rotator_setup_wbengine(struct sde_hw_rotator_context *ctx,
  1408. enum sde_rot_queue_prio queue_id,
  1409. struct sde_hw_rot_wb_cfg *cfg,
  1410. u32 flags)
  1411. {
  1412. struct sde_rot_data_type *mdata = sde_rot_get_mdata();
  1413. struct sde_mdp_format_params *fmt;
  1414. struct sde_rot_cdp_params cdp_params = {0};
  1415. char __iomem *wrptr;
  1416. u32 pack = 0;
  1417. u32 dst_format = 0;
  1418. u32 no_partial_writes = 0;
  1419. int i;
  1420. wrptr = sde_hw_rotator_get_regdma_segment(ctx);
  1421. fmt = cfg->fmt;
  1422. /* setup WB DST format */
  1423. dst_format |= (fmt->chroma_sample << 23) |
  1424. (fmt->fetch_planes << 19) |
  1425. (fmt->bits[C3_ALPHA] << 6) |
  1426. (fmt->bits[C2_R_Cr] << 4) |
  1427. (fmt->bits[C1_B_Cb] << 2) |
  1428. (fmt->bits[C0_G_Y] << 0);
  1429. /* alpha control */
  1430. if (fmt->alpha_enable || (!fmt->is_yuv && (fmt->unpack_count == 4))) {
  1431. dst_format |= BIT(8);
  1432. if (!fmt->alpha_enable) {
  1433. dst_format |= BIT(14);
  1434. SDE_REGDMA_WRITE(wrptr, ROT_WB_DST_ALPHA_X_VALUE, 0);
  1435. }
  1436. }
  1437. dst_format |= ((fmt->unpack_count - 1) << 12) |
  1438. (fmt->unpack_tight << 17) |
  1439. (fmt->unpack_align_msb << 18) |
  1440. ((fmt->bpp - 1) << 9) |
  1441. ((fmt->frame_format & 3) << 30);
  1442. if (sde_mdp_is_yuv_format(fmt))
  1443. dst_format |= BIT(15);
  1444. if (fmt->pixel_mode == SDE_MDP_PIXEL_10BIT)
  1445. dst_format |= BIT(21); /* PACK_DX_FORMAT */
  1446. /*
  1447. * REGDMA BLK write, from DST_FORMAT to DST_YSTRIDE 1, total 9 regs
  1448. */
  1449. SDE_REGDMA_BLKWRITE_INC(wrptr, ROT_WB_DST_FORMAT, 9);
  1450. /* DST_FORMAT */
  1451. SDE_REGDMA_BLKWRITE_DATA(wrptr, dst_format);
  1452. /* DST_OP_MODE */
  1453. if (sde_mdp_is_ubwc_format(fmt))
  1454. SDE_REGDMA_BLKWRITE_DATA(wrptr, BIT(0));
  1455. else
  1456. SDE_REGDMA_BLKWRITE_DATA(wrptr, 0);
  1457. /* DST_PACK_PATTERN */
  1458. pack = (fmt->element[3] << 24) | (fmt->element[2] << 16) |
  1459. (fmt->element[1] << 8) | (fmt->element[0] << 0);
  1460. SDE_REGDMA_BLKWRITE_DATA(wrptr, pack);
  1461. /* DST_ADDR [0-3], DST_YSTRIDE [0-1] */
  1462. for (i = 0; i < SDE_ROT_MAX_PLANES; i++)
  1463. SDE_REGDMA_BLKWRITE_DATA(wrptr, cfg->data->p[i].addr);
  1464. SDE_REGDMA_BLKWRITE_DATA(wrptr, cfg->dst_plane.ystride[0] |
  1465. (cfg->dst_plane.ystride[1] << 16));
  1466. SDE_REGDMA_BLKWRITE_DATA(wrptr, cfg->dst_plane.ystride[2] |
  1467. (cfg->dst_plane.ystride[3] << 16));
  1468. /* setup WB out image size and ROI */
  1469. SDE_REGDMA_WRITE(wrptr, ROT_WB_OUT_IMG_SIZE,
  1470. cfg->img_width | (cfg->img_height << 16));
  1471. SDE_REGDMA_WRITE(wrptr, ROT_WB_OUT_SIZE,
  1472. cfg->dst_rect->w | (cfg->dst_rect->h << 16));
  1473. SDE_REGDMA_WRITE(wrptr, ROT_WB_OUT_XY,
  1474. cfg->dst_rect->x | (cfg->dst_rect->y << 16));
  1475. if (flags & (SDE_ROT_FLAG_SECURE_OVERLAY_SESSION |
  1476. SDE_ROT_FLAG_SECURE_CAMERA_SESSION))
  1477. SDE_REGDMA_WRITE(wrptr, ROT_WB_DST_ADDR_SW_STATUS, 0x1);
  1478. else
  1479. SDE_REGDMA_WRITE(wrptr, ROT_WB_DST_ADDR_SW_STATUS, 0);
  1480. /*
  1481. * setup Downscale factor
  1482. */
  1483. SDE_REGDMA_WRITE(wrptr, ROTTOP_DNSC,
  1484. cfg->v_downscale_factor |
  1485. (cfg->h_downscale_factor << 16));
  1486. /* partial write check */
  1487. if (test_bit(SDE_CAPS_PARTIALWR, mdata->sde_caps_map)) {
  1488. no_partial_writes = BIT(10);
  1489. /*
  1490. * For simplicity, don't disable partial writes if
  1491. * the ROI does not span the entire width of the
  1492. * output image, and require the total stride to
  1493. * also be properly aligned.
  1494. *
  1495. * This avoids having to determine the memory access
  1496. * alignment of the actual horizontal ROI on a per
  1497. * color format basis.
  1498. */
  1499. if (sde_mdp_is_ubwc_format(fmt)) {
  1500. no_partial_writes = 0x0;
  1501. } else if (cfg->dst_rect->x ||
  1502. cfg->dst_rect->w != cfg->img_width) {
  1503. no_partial_writes = 0x0;
  1504. } else {
  1505. for (i = 0; i < SDE_ROT_MAX_PLANES; i++)
  1506. if (cfg->dst_plane.ystride[i] &
  1507. PARTIAL_WRITE_ALIGNMENT)
  1508. no_partial_writes = 0x0;
  1509. }
  1510. }
  1511. /* write config setup for bank configuration */
  1512. SDE_REGDMA_WRITE(wrptr, ROT_WB_DST_WRITE_CONFIG, no_partial_writes |
  1513. (ctx->rot->highest_bank & 0x3) << 8);
  1514. if (test_bit(SDE_CAPS_UBWC_2, mdata->sde_caps_map))
  1515. SDE_REGDMA_WRITE(wrptr, ROT_WB_UBWC_STATIC_CTRL,
  1516. ((ctx->rot->ubwc_malsize & 0x3) << 8) |
  1517. ((ctx->rot->highest_bank & 0x3) << 4) |
  1518. ((ctx->rot->ubwc_swizzle & 0x1) << 0));
  1519. if (test_bit(SDE_CAPS_SBUF_1, mdata->sde_caps_map))
  1520. SDE_REGDMA_WRITE(wrptr, ROT_WB_SYS_CACHE_MODE,
  1521. ctx->sys_cache_mode);
  1522. SDE_REGDMA_WRITE(wrptr, ROTTOP_OP_MODE, ctx->op_mode |
  1523. (flags & SDE_ROT_FLAG_ROT_90 ? BIT(1) : 0) | BIT(0));
  1524. sde_hw_rotator_put_regdma_segment(ctx, wrptr);
  1525. /* CDP register WR setting */
  1526. cdp_params.enable = test_bit(SDE_QOS_CDP, mdata->sde_qos_map) ?
  1527. mdata->enable_cdp[SDE_ROT_WR] : false;
  1528. cdp_params.fmt = fmt;
  1529. cdp_params.offset = ROT_WB_CDP_CNTL;
  1530. sde_hw_rotator_cdp_configs(ctx, &cdp_params);
  1531. /* QOS LUT/ Danger LUT/ Safe LUT WR setting */
  1532. sde_hw_rotator_setup_qos_lut_wr(ctx);
  1533. wrptr = sde_hw_rotator_get_regdma_segment(ctx);
  1534. /* setup traffic shaper for 4k 30fps content or if prefill_bw is set */
  1535. if (ctx->is_traffic_shaping || cfg->prefill_bw) {
  1536. u32 bw;
  1537. /*
  1538. * Target to finish in 12ms, and we need to set number of bytes
  1539. * per clock tick for traffic shaping.
  1540. * Each clock tick run @ 19.2MHz, so we need we know total of
  1541. * clock ticks in 14ms, i.e. 12ms/(1/19.2MHz) ==> 23040
  1542. * Finally, calcualte the byte count per clock tick based on
  1543. * resolution, bpp and compression ratio.
  1544. */
  1545. bw = cfg->dst_rect->w * cfg->dst_rect->h;
  1546. if (fmt->chroma_sample == SDE_MDP_CHROMA_420)
  1547. bw = (bw * 3) / 2;
  1548. else
  1549. bw *= fmt->bpp;
  1550. bw /= TRAFFIC_SHAPE_CLKTICK_12MS;
  1551. /* use prefill bandwidth instead if specified */
  1552. if (cfg->prefill_bw)
  1553. bw = DIV_ROUND_UP_SECTOR_T(cfg->prefill_bw,
  1554. TRAFFIC_SHAPE_VSYNC_CLK);
  1555. if (bw > 0xFF)
  1556. bw = 0xFF;
  1557. else if (bw == 0)
  1558. bw = 1;
  1559. SDE_REGDMA_WRITE(wrptr, ROT_WB_TRAFFIC_SHAPER_WR_CLIENT,
  1560. BIT(31) | (cfg->prefill_bw ? BIT(27) : 0) | bw);
  1561. SDEROT_DBG("Enable ROT_WB Traffic Shaper:%d\n", bw);
  1562. } else {
  1563. SDE_REGDMA_WRITE(wrptr, ROT_WB_TRAFFIC_SHAPER_WR_CLIENT, 0);
  1564. SDEROT_DBG("Disable ROT_WB Traffic Shaper\n");
  1565. }
  1566. /* Update command queue write ptr */
  1567. sde_hw_rotator_put_regdma_segment(ctx, wrptr);
  1568. }
  1569. /*
  1570. * sde_hw_rotator_start_no_regdma - start non-regdma operation
  1571. * @ctx: Pointer to rotator context
  1572. * @queue_id: Priority queue identifier
  1573. */
  1574. static u32 sde_hw_rotator_start_no_regdma(struct sde_hw_rotator_context *ctx,
  1575. enum sde_rot_queue_prio queue_id)
  1576. {
  1577. struct sde_hw_rotator *rot = ctx->rot;
  1578. char __iomem *wrptr;
  1579. char __iomem *mem_rdptr;
  1580. char __iomem *addr;
  1581. u32 mask;
  1582. u32 cmd0, cmd1, cmd2;
  1583. u32 blksize;
  1584. /*
  1585. * when regdma is not using, the regdma segment is just a normal
  1586. * DRAM, and not an iomem.
  1587. */
  1588. mem_rdptr = sde_hw_rotator_get_regdma_segment_base(ctx);
  1589. wrptr = sde_hw_rotator_get_regdma_segment(ctx);
  1590. if (rot->irq_num >= 0) {
  1591. SDE_REGDMA_WRITE(wrptr, ROTTOP_INTR_EN, 1);
  1592. SDE_REGDMA_WRITE(wrptr, ROTTOP_INTR_CLEAR, 1);
  1593. reinit_completion(&ctx->rot_comp);
  1594. sde_hw_rotator_enable_irq(rot);
  1595. }
  1596. SDE_REGDMA_WRITE(wrptr, ROTTOP_START_CTRL, ctx->start_ctrl);
  1597. /* Update command queue write ptr */
  1598. sde_hw_rotator_put_regdma_segment(ctx, wrptr);
  1599. SDEROT_DBG("BEGIN %d\n", ctx->timestamp);
  1600. /* Write all command stream to Rotator blocks */
  1601. /* Rotator will start right away after command stream finish writing */
  1602. while (mem_rdptr < wrptr) {
  1603. u32 op = REGDMA_OP_MASK & readl_relaxed_no_log(mem_rdptr);
  1604. switch (op) {
  1605. case REGDMA_OP_NOP:
  1606. SDEROT_DBG("NOP\n");
  1607. mem_rdptr += sizeof(u32);
  1608. break;
  1609. case REGDMA_OP_REGWRITE:
  1610. SDE_REGDMA_READ(mem_rdptr, cmd0);
  1611. SDE_REGDMA_READ(mem_rdptr, cmd1);
  1612. SDEROT_DBG("REGW %6.6x %8.8x\n",
  1613. cmd0 & REGDMA_ADDR_OFFSET_MASK,
  1614. cmd1);
  1615. addr = rot->mdss_base +
  1616. (cmd0 & REGDMA_ADDR_OFFSET_MASK);
  1617. writel_relaxed(cmd1, addr);
  1618. break;
  1619. case REGDMA_OP_REGMODIFY:
  1620. SDE_REGDMA_READ(mem_rdptr, cmd0);
  1621. SDE_REGDMA_READ(mem_rdptr, cmd1);
  1622. SDE_REGDMA_READ(mem_rdptr, cmd2);
  1623. SDEROT_DBG("REGM %6.6x %8.8x %8.8x\n",
  1624. cmd0 & REGDMA_ADDR_OFFSET_MASK,
  1625. cmd1, cmd2);
  1626. addr = rot->mdss_base +
  1627. (cmd0 & REGDMA_ADDR_OFFSET_MASK);
  1628. mask = cmd1;
  1629. writel_relaxed((readl_relaxed(addr) & mask) | cmd2,
  1630. addr);
  1631. break;
  1632. case REGDMA_OP_BLKWRITE_SINGLE:
  1633. SDE_REGDMA_READ(mem_rdptr, cmd0);
  1634. SDE_REGDMA_READ(mem_rdptr, cmd1);
  1635. SDEROT_DBG("BLKWS %6.6x %6.6x\n",
  1636. cmd0 & REGDMA_ADDR_OFFSET_MASK,
  1637. cmd1);
  1638. addr = rot->mdss_base +
  1639. (cmd0 & REGDMA_ADDR_OFFSET_MASK);
  1640. blksize = cmd1;
  1641. while (blksize--) {
  1642. SDE_REGDMA_READ(mem_rdptr, cmd0);
  1643. SDEROT_DBG("DATA %8.8x\n", cmd0);
  1644. writel_relaxed(cmd0, addr);
  1645. }
  1646. break;
  1647. case REGDMA_OP_BLKWRITE_INC:
  1648. SDE_REGDMA_READ(mem_rdptr, cmd0);
  1649. SDE_REGDMA_READ(mem_rdptr, cmd1);
  1650. SDEROT_DBG("BLKWI %6.6x %6.6x\n",
  1651. cmd0 & REGDMA_ADDR_OFFSET_MASK,
  1652. cmd1);
  1653. addr = rot->mdss_base +
  1654. (cmd0 & REGDMA_ADDR_OFFSET_MASK);
  1655. blksize = cmd1;
  1656. while (blksize--) {
  1657. SDE_REGDMA_READ(mem_rdptr, cmd0);
  1658. SDEROT_DBG("DATA %8.8x\n", cmd0);
  1659. writel_relaxed(cmd0, addr);
  1660. addr += 4;
  1661. }
  1662. break;
  1663. default:
  1664. /* Other not supported OP mode
  1665. * Skip data for now for unregonized OP mode
  1666. */
  1667. SDEROT_DBG("UNDEFINED\n");
  1668. mem_rdptr += sizeof(u32);
  1669. break;
  1670. }
  1671. }
  1672. SDEROT_DBG("END %d\n", ctx->timestamp);
  1673. return ctx->timestamp;
  1674. }
  1675. /*
  1676. * sde_hw_rotator_start_regdma - start regdma operation
  1677. * @ctx: Pointer to rotator context
  1678. * @queue_id: Priority queue identifier
  1679. */
  1680. static u32 sde_hw_rotator_start_regdma(struct sde_hw_rotator_context *ctx,
  1681. enum sde_rot_queue_prio queue_id)
  1682. {
  1683. struct sde_rot_data_type *mdata = sde_rot_get_mdata();
  1684. struct sde_hw_rotator *rot = ctx->rot;
  1685. char __iomem *wrptr;
  1686. u32 regdmaSlot;
  1687. u32 offset;
  1688. u32 length;
  1689. u32 ts_length;
  1690. u32 enableInt;
  1691. u32 swts = 0;
  1692. u32 mask = 0;
  1693. u32 trig_sel;
  1694. bool int_trigger = false;
  1695. wrptr = sde_hw_rotator_get_regdma_segment(ctx);
  1696. /* Enable HW timestamp if supported in rotator */
  1697. if (test_bit(SDE_CAPS_HW_TIMESTAMP, mdata->sde_caps_map)) {
  1698. SDE_REGDMA_MODIFY(wrptr, ROTTOP_ROT_CNTR_CTRL,
  1699. ~BIT(queue_id), BIT(queue_id));
  1700. int_trigger = true;
  1701. } else if (ctx->sbuf_mode) {
  1702. int_trigger = true;
  1703. }
  1704. /*
  1705. * Last ROT command must be ROT_START before REGDMA start
  1706. */
  1707. SDE_REGDMA_WRITE(wrptr, ROTTOP_START_CTRL, ctx->start_ctrl);
  1708. sde_hw_rotator_put_regdma_segment(ctx, wrptr);
  1709. /*
  1710. * Start REGDMA with command offset and size
  1711. */
  1712. regdmaSlot = sde_hw_rotator_get_regdma_ctxidx(ctx);
  1713. length = (wrptr - ctx->regdma_base) / 4;
  1714. offset = (ctx->regdma_base - (rot->mdss_base +
  1715. REGDMA_RAM_REGDMA_CMD_RAM)) / sizeof(u32);
  1716. enableInt = ((ctx->timestamp & 1) + 1) << 30;
  1717. trig_sel = ctx->sbuf_mode ? REGDMA_CMD_TRIG_SEL_MDP_FLUSH :
  1718. REGDMA_CMD_TRIG_SEL_SW_START;
  1719. SDEROT_DBG(
  1720. "regdma(%d)[%d] <== INT:0x%X|length:%d|offset:0x%X, ts:%X\n",
  1721. queue_id, regdmaSlot, enableInt, length, offset,
  1722. ctx->timestamp);
  1723. /* ensure the command packet is issued before the submit command */
  1724. wmb();
  1725. /* REGDMA submission for current context */
  1726. if (queue_id == ROT_QUEUE_HIGH_PRIORITY) {
  1727. SDE_ROTREG_WRITE(rot->mdss_base,
  1728. REGDMA_CSR_REGDMA_QUEUE_0_SUBMIT,
  1729. (int_trigger ? enableInt : 0) | trig_sel |
  1730. ((length & 0x3ff) << 14) | offset);
  1731. swts = ctx->timestamp;
  1732. mask = ~SDE_REGDMA_SWTS_MASK;
  1733. } else {
  1734. SDE_ROTREG_WRITE(rot->mdss_base,
  1735. REGDMA_CSR_REGDMA_QUEUE_1_SUBMIT,
  1736. (int_trigger ? enableInt : 0) | trig_sel |
  1737. ((length & 0x3ff) << 14) | offset);
  1738. swts = ctx->timestamp << SDE_REGDMA_SWTS_SHIFT;
  1739. mask = ~(SDE_REGDMA_SWTS_MASK << SDE_REGDMA_SWTS_SHIFT);
  1740. }
  1741. SDEROT_EVTLOG(ctx->timestamp, queue_id, length, offset, ctx->sbuf_mode);
  1742. /* sw timestamp update can only be used in offline multi-context mode */
  1743. if (!int_trigger) {
  1744. /* Write timestamp after previous rotator job finished */
  1745. sde_hw_rotator_setup_timestamp_packet(ctx, mask, swts);
  1746. offset += length;
  1747. ts_length = sde_hw_rotator_get_regdma_segment(ctx) - wrptr;
  1748. ts_length /= sizeof(u32);
  1749. WARN_ON((length + ts_length) > SDE_HW_ROT_REGDMA_SEG_SIZE);
  1750. /* ensure command packet is issue before the submit command */
  1751. wmb();
  1752. SDEROT_EVTLOG(queue_id, enableInt, ts_length, offset);
  1753. if (queue_id == ROT_QUEUE_HIGH_PRIORITY) {
  1754. SDE_ROTREG_WRITE(rot->mdss_base,
  1755. REGDMA_CSR_REGDMA_QUEUE_0_SUBMIT,
  1756. enableInt | (ts_length << 14) | offset);
  1757. } else {
  1758. SDE_ROTREG_WRITE(rot->mdss_base,
  1759. REGDMA_CSR_REGDMA_QUEUE_1_SUBMIT,
  1760. enableInt | (ts_length << 14) | offset);
  1761. }
  1762. }
  1763. /* Update command queue write ptr */
  1764. sde_hw_rotator_put_regdma_segment(ctx, wrptr);
  1765. return ctx->timestamp;
  1766. }
  1767. /*
  1768. * sde_hw_rotator_wait_done_no_regdma - wait for non-regdma completion
  1769. * @ctx: Pointer to rotator context
  1770. * @queue_id: Priority queue identifier
  1771. * @flags: Option flag
  1772. */
  1773. static u32 sde_hw_rotator_wait_done_no_regdma(
  1774. struct sde_hw_rotator_context *ctx,
  1775. enum sde_rot_queue_prio queue_id, u32 flag)
  1776. {
  1777. struct sde_hw_rotator *rot = ctx->rot;
  1778. int rc = 0;
  1779. u32 sts = 0;
  1780. u32 status;
  1781. unsigned long flags;
  1782. if (rot->irq_num >= 0) {
  1783. SDEROT_DBG("Wait for Rotator completion\n");
  1784. rc = wait_for_completion_timeout(&ctx->rot_comp,
  1785. ctx->sbuf_mode ?
  1786. msecs_to_jiffies(KOFF_TIMEOUT_SBUF) :
  1787. msecs_to_jiffies(rot->koff_timeout));
  1788. spin_lock_irqsave(&rot->rotisr_lock, flags);
  1789. status = SDE_ROTREG_READ(rot->mdss_base, ROTTOP_STATUS);
  1790. if (rc == 0) {
  1791. /*
  1792. * Timeout, there might be error,
  1793. * or rotator still busy
  1794. */
  1795. if (status & ROT_BUSY_BIT)
  1796. SDEROT_ERR(
  1797. "Timeout waiting for rotator done\n");
  1798. else if (status & ROT_ERROR_BIT)
  1799. SDEROT_ERR(
  1800. "Rotator report error status\n");
  1801. else
  1802. SDEROT_WARN(
  1803. "Timeout waiting, but rotator job is done!!\n");
  1804. sde_hw_rotator_disable_irq(rot);
  1805. }
  1806. spin_unlock_irqrestore(&rot->rotisr_lock, flags);
  1807. } else {
  1808. int cnt = 200;
  1809. do {
  1810. udelay(500);
  1811. status = SDE_ROTREG_READ(rot->mdss_base, ROTTOP_STATUS);
  1812. cnt--;
  1813. } while ((cnt > 0) && (status & ROT_BUSY_BIT)
  1814. && ((status & ROT_ERROR_BIT) == 0));
  1815. if (status & ROT_ERROR_BIT)
  1816. SDEROT_ERR("Rotator error\n");
  1817. else if (status & ROT_BUSY_BIT)
  1818. SDEROT_ERR("Rotator busy\n");
  1819. SDE_ROTREG_WRITE(rot->mdss_base, ROTTOP_INTR_CLEAR,
  1820. ROT_DONE_CLEAR);
  1821. }
  1822. sts = (status & ROT_ERROR_BIT) ? -ENODEV : 0;
  1823. return sts;
  1824. }
  1825. /*
  1826. * sde_hw_rotator_wait_done_regdma - wait for regdma completion
  1827. * @ctx: Pointer to rotator context
  1828. * @queue_id: Priority queue identifier
  1829. * @flags: Option flag
  1830. */
  1831. static u32 sde_hw_rotator_wait_done_regdma(
  1832. struct sde_hw_rotator_context *ctx,
  1833. enum sde_rot_queue_prio queue_id, u32 flag)
  1834. {
  1835. struct sde_hw_rotator *rot = ctx->rot;
  1836. int rc = 0;
  1837. bool timeout = false;
  1838. bool pending;
  1839. bool abort;
  1840. u32 status;
  1841. u32 last_isr;
  1842. u32 last_ts;
  1843. u32 int_id;
  1844. u32 swts;
  1845. u32 sts = 0;
  1846. u32 ubwcerr;
  1847. u32 hwts[ROT_QUEUE_MAX];
  1848. unsigned long flags;
  1849. if (rot->irq_num >= 0) {
  1850. SDEROT_DBG("Wait for REGDMA completion, ctx:%pK, ts:%X\n",
  1851. ctx, ctx->timestamp);
  1852. rc = wait_event_timeout(ctx->regdma_waitq,
  1853. !rot->ops.get_pending_ts(rot, ctx, &swts),
  1854. ctx->sbuf_mode ?
  1855. msecs_to_jiffies(KOFF_TIMEOUT_SBUF) :
  1856. msecs_to_jiffies(rot->koff_timeout));
  1857. ATRACE_INT("sde_rot_done", 0);
  1858. spin_lock_irqsave(&rot->rotisr_lock, flags);
  1859. last_isr = ctx->last_regdma_isr_status;
  1860. last_ts = ctx->last_regdma_timestamp;
  1861. abort = ctx->abort;
  1862. status = last_isr & REGDMA_INT_MASK;
  1863. int_id = last_ts & 1;
  1864. SDEROT_DBG("INT status:0x%X, INT id:%d, timestamp:0x%X\n",
  1865. status, int_id, last_ts);
  1866. if (rc == 0 || (status & REGDMA_INT_ERR_MASK) || abort) {
  1867. timeout = true;
  1868. pending = rot->ops.get_pending_ts(rot, ctx, &swts);
  1869. /* cache ubwcerr and hw timestamps while locked */
  1870. ubwcerr = SDE_ROTREG_READ(rot->mdss_base,
  1871. ROT_SSPP_UBWC_ERROR_STATUS);
  1872. hwts[ROT_QUEUE_HIGH_PRIORITY] =
  1873. __sde_hw_rotator_get_timestamp(rot,
  1874. ROT_QUEUE_HIGH_PRIORITY);
  1875. hwts[ROT_QUEUE_LOW_PRIORITY] =
  1876. __sde_hw_rotator_get_timestamp(rot,
  1877. ROT_QUEUE_LOW_PRIORITY);
  1878. spin_unlock_irqrestore(&rot->rotisr_lock, flags);
  1879. if (ubwcerr || abort ||
  1880. sde_hw_rotator_halt_vbif_xin_client()) {
  1881. /*
  1882. * Perform recovery for ROT SSPP UBWC decode
  1883. * error.
  1884. * - SW reset rotator hw block
  1885. * - reset TS logic so all pending rotation
  1886. * in hw queue got done signalled
  1887. */
  1888. if (!sde_hw_rotator_reset(rot, ctx))
  1889. status = REGDMA_INCOMPLETE_CMD;
  1890. else
  1891. status = ROT_ERROR_BIT;
  1892. } else {
  1893. status = ROT_ERROR_BIT;
  1894. }
  1895. spin_lock_irqsave(&rot->rotisr_lock, flags);
  1896. } else {
  1897. if (rc == 1)
  1898. SDEROT_WARN(
  1899. "REGDMA done but no irq, ts:0x%X/0x%X\n",
  1900. ctx->timestamp, swts);
  1901. status = 0;
  1902. }
  1903. spin_unlock_irqrestore(&rot->rotisr_lock, flags);
  1904. /* dump rot status after releasing lock if timeout occurred */
  1905. if (timeout) {
  1906. SDEROT_ERR(
  1907. "TIMEOUT, ts:0x%X/0x%X, pending:%d, abort:%d\n",
  1908. ctx->timestamp, swts, pending, abort);
  1909. SDEROT_ERR(
  1910. "Cached: HW ts0/ts1 = %x/%x, ubwcerr = %x\n",
  1911. hwts[ROT_QUEUE_HIGH_PRIORITY],
  1912. hwts[ROT_QUEUE_LOW_PRIORITY], ubwcerr);
  1913. if (status & REGDMA_WATCHDOG_INT)
  1914. SDEROT_ERR("REGDMA watchdog interrupt\n");
  1915. else if (status & REGDMA_INVALID_DESCRIPTOR)
  1916. SDEROT_ERR("REGDMA invalid descriptor\n");
  1917. else if (status & REGDMA_INCOMPLETE_CMD)
  1918. SDEROT_ERR("REGDMA incomplete command\n");
  1919. else if (status & REGDMA_INVALID_CMD)
  1920. SDEROT_ERR("REGDMA invalid command\n");
  1921. _sde_hw_rotator_dump_status(rot, &ubwcerr);
  1922. }
  1923. } else {
  1924. int cnt = 200;
  1925. bool pending;
  1926. do {
  1927. udelay(500);
  1928. last_isr = SDE_ROTREG_READ(rot->mdss_base,
  1929. REGDMA_CSR_REGDMA_INT_STATUS);
  1930. pending = rot->ops.get_pending_ts(rot, ctx, &swts);
  1931. cnt--;
  1932. } while ((cnt > 0) && pending &&
  1933. ((last_isr & REGDMA_INT_ERR_MASK) == 0));
  1934. if (last_isr & REGDMA_INT_ERR_MASK) {
  1935. SDEROT_ERR("Rotator error, ts:0x%X/0x%X status:%x\n",
  1936. ctx->timestamp, swts, last_isr);
  1937. _sde_hw_rotator_dump_status(rot, NULL);
  1938. status = ROT_ERROR_BIT;
  1939. } else if (pending) {
  1940. SDEROT_ERR("Rotator timeout, ts:0x%X/0x%X status:%x\n",
  1941. ctx->timestamp, swts, last_isr);
  1942. _sde_hw_rotator_dump_status(rot, NULL);
  1943. status = ROT_ERROR_BIT;
  1944. } else {
  1945. status = 0;
  1946. }
  1947. SDE_ROTREG_WRITE(rot->mdss_base, REGDMA_CSR_REGDMA_INT_CLEAR,
  1948. last_isr);
  1949. }
  1950. sts = (status & (ROT_ERROR_BIT | REGDMA_INCOMPLETE_CMD)) ? -ENODEV : 0;
  1951. if (status & ROT_ERROR_BIT)
  1952. SDEROT_EVTLOG_TOUT_HANDLER("rot", "rot_dbg_bus",
  1953. "vbif_dbg_bus", "panic");
  1954. return sts;
  1955. }
  1956. /*
  1957. * setup_rotator_ops - setup callback functions for the low-level HAL
  1958. * @ops: Pointer to low-level ops callback
  1959. * @mode: Operation mode (non-regdma or regdma)
  1960. * @use_hwts: HW timestamp support mode
  1961. */
  1962. static void setup_rotator_ops(struct sde_hw_rotator_ops *ops,
  1963. enum sde_rotator_regdma_mode mode,
  1964. bool use_hwts)
  1965. {
  1966. ops->setup_rotator_fetchengine = sde_hw_rotator_setup_fetchengine;
  1967. ops->setup_rotator_wbengine = sde_hw_rotator_setup_wbengine;
  1968. if (mode == ROT_REGDMA_ON) {
  1969. ops->start_rotator = sde_hw_rotator_start_regdma;
  1970. ops->wait_rotator_done = sde_hw_rotator_wait_done_regdma;
  1971. } else {
  1972. ops->start_rotator = sde_hw_rotator_start_no_regdma;
  1973. ops->wait_rotator_done = sde_hw_rotator_wait_done_no_regdma;
  1974. }
  1975. if (use_hwts) {
  1976. ops->get_pending_ts = sde_hw_rotator_pending_hwts;
  1977. ops->update_ts = sde_hw_rotator_update_hwts;
  1978. } else {
  1979. ops->get_pending_ts = sde_hw_rotator_pending_swts;
  1980. ops->update_ts = sde_hw_rotator_update_swts;
  1981. }
  1982. }
  1983. /*
  1984. * sde_hw_rotator_swts_create - create software timestamp buffer
  1985. * @rot: Pointer to rotator hw
  1986. *
  1987. * This buffer is used by regdma to keep track of last completed command.
  1988. */
  1989. static int sde_hw_rotator_swts_create(struct sde_hw_rotator *rot)
  1990. {
  1991. int rc = 0;
  1992. struct sde_mdp_img_data *data;
  1993. u32 bufsize = sizeof(int) * SDE_HW_ROT_REGDMA_TOTAL_CTX * 2;
  1994. if (bufsize < SZ_4K)
  1995. bufsize = SZ_4K;
  1996. data = &rot->swts_buf;
  1997. data->len = bufsize;
  1998. data->srcp_dma_buf = sde_rot_get_dmabuf(data);
  1999. if (!data->srcp_dma_buf) {
  2000. SDEROT_ERR("Fail dmabuf create\n");
  2001. return -ENOMEM;
  2002. }
  2003. sde_smmu_ctrl(1);
  2004. data->srcp_attachment = sde_smmu_dma_buf_attach(data->srcp_dma_buf,
  2005. &rot->pdev->dev, SDE_IOMMU_DOMAIN_ROT_UNSECURE);
  2006. if (IS_ERR_OR_NULL(data->srcp_attachment)) {
  2007. SDEROT_ERR("sde_smmu_dma_buf_attach error\n");
  2008. rc = -ENOMEM;
  2009. goto err_put;
  2010. }
  2011. data->srcp_table = dma_buf_map_attachment(data->srcp_attachment,
  2012. DMA_BIDIRECTIONAL);
  2013. if (IS_ERR_OR_NULL(data->srcp_table)) {
  2014. SDEROT_ERR("dma_buf_map_attachment error\n");
  2015. rc = -ENOMEM;
  2016. goto err_detach;
  2017. }
  2018. rc = sde_smmu_map_dma_buf(data->srcp_dma_buf, data->srcp_table,
  2019. SDE_IOMMU_DOMAIN_ROT_UNSECURE, &data->addr,
  2020. &data->len, DMA_BIDIRECTIONAL);
  2021. if (rc < 0) {
  2022. SDEROT_ERR("smmu_map_dma_buf failed: (%d)\n", rc);
  2023. goto err_unmap;
  2024. }
  2025. data->mapped = true;
  2026. SDEROT_DBG("swts buffer mapped: %pad/%lx va:%pK\n", &data->addr,
  2027. data->len, rot->swts_buffer);
  2028. sde_smmu_ctrl(0);
  2029. return rc;
  2030. err_unmap:
  2031. dma_buf_unmap_attachment(data->srcp_attachment, data->srcp_table,
  2032. DMA_FROM_DEVICE);
  2033. err_detach:
  2034. dma_buf_detach(data->srcp_dma_buf, data->srcp_attachment);
  2035. err_put:
  2036. data->srcp_dma_buf = NULL;
  2037. sde_smmu_ctrl(0);
  2038. return rc;
  2039. }
  2040. /*
  2041. * sde_hw_rotator_swts_destroy - destroy software timestamp buffer
  2042. * @rot: Pointer to rotator hw
  2043. */
  2044. static void sde_hw_rotator_swts_destroy(struct sde_hw_rotator *rot)
  2045. {
  2046. struct sde_mdp_img_data *data;
  2047. data = &rot->swts_buf;
  2048. sde_smmu_unmap_dma_buf(data->srcp_table, SDE_IOMMU_DOMAIN_ROT_UNSECURE,
  2049. DMA_FROM_DEVICE, data->srcp_dma_buf);
  2050. dma_buf_unmap_attachment(data->srcp_attachment, data->srcp_table,
  2051. DMA_FROM_DEVICE);
  2052. dma_buf_detach(data->srcp_dma_buf, data->srcp_attachment);
  2053. dma_buf_put(data->srcp_dma_buf);
  2054. data->addr = 0;
  2055. data->srcp_dma_buf = NULL;
  2056. data->srcp_attachment = NULL;
  2057. data->mapped = false;
  2058. }
  2059. /*
  2060. * sde_hw_rotator_pre_pmevent - SDE rotator core will call this before a
  2061. * PM event occurs
  2062. * @mgr: Pointer to rotator manager
  2063. * @pmon: Boolean indicate an on/off power event
  2064. */
  2065. void sde_hw_rotator_pre_pmevent(struct sde_rot_mgr *mgr, bool pmon)
  2066. {
  2067. struct sde_hw_rotator *rot;
  2068. u32 l_ts, h_ts, l_hwts, h_hwts;
  2069. u32 rotsts, regdmasts, rotopmode;
  2070. /*
  2071. * Check last HW timestamp with SW timestamp before power off event.
  2072. * If there is a mismatch, that will be quite possible the rotator HW
  2073. * is either hang or not finishing last submitted job. In that case,
  2074. * it is best to do a timeout eventlog to capture some good events
  2075. * log data for analysis.
  2076. */
  2077. if (!pmon && mgr && mgr->hw_data) {
  2078. rot = mgr->hw_data;
  2079. h_ts = atomic_read(&rot->timestamp[ROT_QUEUE_HIGH_PRIORITY]) &
  2080. SDE_REGDMA_SWTS_MASK;
  2081. l_ts = atomic_read(&rot->timestamp[ROT_QUEUE_LOW_PRIORITY]) &
  2082. SDE_REGDMA_SWTS_MASK;
  2083. /* Need to turn on clock to access rotator register */
  2084. sde_rotator_clk_ctrl(mgr, true);
  2085. l_hwts = __sde_hw_rotator_get_timestamp(rot,
  2086. ROT_QUEUE_LOW_PRIORITY);
  2087. h_hwts = __sde_hw_rotator_get_timestamp(rot,
  2088. ROT_QUEUE_HIGH_PRIORITY);
  2089. regdmasts = SDE_ROTREG_READ(rot->mdss_base,
  2090. REGDMA_CSR_REGDMA_BLOCK_STATUS);
  2091. rotsts = SDE_ROTREG_READ(rot->mdss_base, ROTTOP_STATUS);
  2092. rotopmode = SDE_ROTREG_READ(rot->mdss_base, ROTTOP_OP_MODE);
  2093. SDEROT_DBG(
  2094. "swts(l/h):0x%x/0x%x, hwts(l/h):0x%x/0x%x, regdma-sts:0x%x, rottop-sts:0x%x\n",
  2095. l_ts, h_ts, l_hwts, h_hwts,
  2096. regdmasts, rotsts);
  2097. SDEROT_EVTLOG(l_ts, h_ts, l_hwts, h_hwts, regdmasts, rotsts);
  2098. if (((l_ts != l_hwts) || (h_ts != h_hwts)) &&
  2099. ((regdmasts & REGDMA_BUSY) ||
  2100. (rotsts & ROT_STATUS_MASK))) {
  2101. SDEROT_ERR(
  2102. "Mismatch SWTS with HWTS: swts(l/h):0x%x/0x%x, hwts(l/h):0x%x/0x%x, regdma-sts:0x%x, rottop-sts:0x%x\n",
  2103. l_ts, h_ts, l_hwts, h_hwts,
  2104. regdmasts, rotsts);
  2105. _sde_hw_rotator_dump_status(rot, NULL);
  2106. SDEROT_EVTLOG_TOUT_HANDLER("rot", "rot_dbg_bus",
  2107. "vbif_dbg_bus", "panic");
  2108. } else if (!SDE_ROTTOP_IN_OFFLINE_MODE(rotopmode) &&
  2109. ((regdmasts & REGDMA_BUSY) ||
  2110. (rotsts & ROT_BUSY_BIT))) {
  2111. /*
  2112. * rotator can stuck in inline while mdp is detached
  2113. */
  2114. SDEROT_WARN(
  2115. "Inline Rot busy: regdma-sts:0x%x, rottop-sts:0x%x, rottop-opmode:0x%x\n",
  2116. regdmasts, rotsts, rotopmode);
  2117. sde_hw_rotator_reset(rot, NULL);
  2118. } else if ((regdmasts & REGDMA_BUSY) ||
  2119. (rotsts & ROT_BUSY_BIT)) {
  2120. _sde_hw_rotator_dump_status(rot, NULL);
  2121. SDEROT_EVTLOG_TOUT_HANDLER("rot", "rot_dbg_bus",
  2122. "vbif_dbg_bus", "panic");
  2123. sde_hw_rotator_reset(rot, NULL);
  2124. }
  2125. /* Turn off rotator clock after checking rotator registers */
  2126. sde_rotator_clk_ctrl(mgr, false);
  2127. }
  2128. }
  2129. /*
  2130. * sde_hw_rotator_post_pmevent - SDE rotator core will call this after a
  2131. * PM event occurs
  2132. * @mgr: Pointer to rotator manager
  2133. * @pmon: Boolean indicate an on/off power event
  2134. */
  2135. void sde_hw_rotator_post_pmevent(struct sde_rot_mgr *mgr, bool pmon)
  2136. {
  2137. struct sde_hw_rotator *rot;
  2138. u32 l_ts, h_ts;
  2139. /*
  2140. * After a power on event, the rotator HW is reset to default setting.
  2141. * It is necessary to synchronize the SW timestamp with the HW.
  2142. */
  2143. if (pmon && mgr && mgr->hw_data) {
  2144. rot = mgr->hw_data;
  2145. h_ts = atomic_read(&rot->timestamp[ROT_QUEUE_HIGH_PRIORITY]);
  2146. l_ts = atomic_read(&rot->timestamp[ROT_QUEUE_LOW_PRIORITY]);
  2147. SDEROT_DBG("h_ts:0x%x, l_ts;0x%x\n", h_ts, l_ts);
  2148. SDEROT_EVTLOG(h_ts, l_ts);
  2149. rot->reset_hw_ts = true;
  2150. rot->last_hwts[ROT_QUEUE_LOW_PRIORITY] =
  2151. l_ts & SDE_REGDMA_SWTS_MASK;
  2152. rot->last_hwts[ROT_QUEUE_HIGH_PRIORITY] =
  2153. h_ts & SDE_REGDMA_SWTS_MASK;
  2154. }
  2155. }
  2156. /*
  2157. * sde_hw_rotator_destroy - Destroy hw rotator and free allocated resources
  2158. * @mgr: Pointer to rotator manager
  2159. */
  2160. static void sde_hw_rotator_destroy(struct sde_rot_mgr *mgr)
  2161. {
  2162. struct sde_rot_data_type *mdata = sde_rot_get_mdata();
  2163. struct sde_hw_rotator *rot;
  2164. if (!mgr || !mgr->pdev || !mgr->hw_data) {
  2165. SDEROT_ERR("null parameters\n");
  2166. return;
  2167. }
  2168. rot = mgr->hw_data;
  2169. if (rot->irq_num >= 0)
  2170. devm_free_irq(&mgr->pdev->dev, rot->irq_num, mdata);
  2171. if (!test_bit(SDE_CAPS_HW_TIMESTAMP, mdata->sde_caps_map) &&
  2172. rot->mode == ROT_REGDMA_ON)
  2173. sde_hw_rotator_swts_destroy(rot);
  2174. devm_kfree(&mgr->pdev->dev, mgr->hw_data);
  2175. mgr->hw_data = NULL;
  2176. }
  2177. /*
  2178. * sde_hw_rotator_alloc_ext - allocate rotator resource from rotator hw
  2179. * @mgr: Pointer to rotator manager
  2180. * @pipe_id: pipe identifier (not used)
  2181. * @wb_id: writeback identifier/priority queue identifier
  2182. *
  2183. * This function allocates a new hw rotator resource for the given priority.
  2184. */
  2185. static struct sde_rot_hw_resource *sde_hw_rotator_alloc_ext(
  2186. struct sde_rot_mgr *mgr, u32 pipe_id, u32 wb_id)
  2187. {
  2188. struct sde_rot_data_type *mdata = sde_rot_get_mdata();
  2189. struct sde_hw_rotator_resource_info *resinfo;
  2190. if (!mgr || !mgr->hw_data) {
  2191. SDEROT_ERR("null parameters\n");
  2192. return NULL;
  2193. }
  2194. /*
  2195. * Allocate rotator resource info. Each allocation is per
  2196. * HW priority queue
  2197. */
  2198. resinfo = devm_kzalloc(&mgr->pdev->dev, sizeof(*resinfo), GFP_KERNEL);
  2199. if (!resinfo) {
  2200. SDEROT_ERR("Failed allocation HW rotator resource info\n");
  2201. return NULL;
  2202. }
  2203. resinfo->rot = mgr->hw_data;
  2204. resinfo->hw.wb_id = wb_id;
  2205. atomic_set(&resinfo->hw.num_active, 0);
  2206. init_waitqueue_head(&resinfo->hw.wait_queue);
  2207. /* For non-regdma, only support one active session */
  2208. if (resinfo->rot->mode == ROT_REGDMA_OFF)
  2209. resinfo->hw.max_active = 1;
  2210. else {
  2211. resinfo->hw.max_active = SDE_HW_ROT_REGDMA_TOTAL_CTX - 1;
  2212. if (!test_bit(SDE_CAPS_HW_TIMESTAMP, mdata->sde_caps_map) &&
  2213. (!resinfo->rot->swts_buf.mapped))
  2214. sde_hw_rotator_swts_create(resinfo->rot);
  2215. }
  2216. if (resinfo->rot->irq_num >= 0)
  2217. sde_hw_rotator_enable_irq(resinfo->rot);
  2218. SDEROT_DBG("New rotator resource:%pK, priority:%d\n",
  2219. resinfo, wb_id);
  2220. return &resinfo->hw;
  2221. }
  2222. /*
  2223. * sde_hw_rotator_free_ext - free the given rotator resource
  2224. * @mgr: Pointer to rotator manager
  2225. * @hw: Pointer to rotator resource
  2226. */
  2227. static void sde_hw_rotator_free_ext(struct sde_rot_mgr *mgr,
  2228. struct sde_rot_hw_resource *hw)
  2229. {
  2230. struct sde_hw_rotator_resource_info *resinfo;
  2231. if (!mgr || !mgr->hw_data)
  2232. return;
  2233. resinfo = container_of(hw, struct sde_hw_rotator_resource_info, hw);
  2234. SDEROT_DBG(
  2235. "Free rotator resource:%pK, priority:%d, active:%d, pending:%d\n",
  2236. resinfo, hw->wb_id, atomic_read(&hw->num_active),
  2237. hw->pending_count);
  2238. if (resinfo->rot->irq_num >= 0)
  2239. sde_hw_rotator_disable_irq(resinfo->rot);
  2240. devm_kfree(&mgr->pdev->dev, resinfo);
  2241. }
  2242. /*
  2243. * sde_hw_rotator_alloc_rotctx - allocate rotator context
  2244. * @rot: Pointer to rotator hw
  2245. * @hw: Pointer to rotator resource
  2246. * @session_id: Session identifier of this context
  2247. * @sequence_id: Sequence identifier of this request
  2248. * @sbuf_mode: true if stream buffer is requested
  2249. *
  2250. * This function allocates a new rotator context for the given session id.
  2251. */
  2252. static struct sde_hw_rotator_context *sde_hw_rotator_alloc_rotctx(
  2253. struct sde_hw_rotator *rot,
  2254. struct sde_rot_hw_resource *hw,
  2255. u32 session_id,
  2256. u32 sequence_id,
  2257. bool sbuf_mode)
  2258. {
  2259. struct sde_hw_rotator_context *ctx;
  2260. /* Allocate rotator context */
  2261. ctx = devm_kzalloc(&rot->pdev->dev, sizeof(*ctx), GFP_KERNEL);
  2262. if (!ctx) {
  2263. SDEROT_ERR("Failed allocation HW rotator context\n");
  2264. return NULL;
  2265. }
  2266. ctx->rot = rot;
  2267. ctx->q_id = hw->wb_id;
  2268. ctx->session_id = session_id;
  2269. ctx->sequence_id = sequence_id;
  2270. ctx->hwres = hw;
  2271. ctx->timestamp = atomic_add_return(1, &rot->timestamp[ctx->q_id]);
  2272. ctx->timestamp &= SDE_REGDMA_SWTS_MASK;
  2273. ctx->is_secure = false;
  2274. ctx->sbuf_mode = sbuf_mode;
  2275. INIT_LIST_HEAD(&ctx->list);
  2276. ctx->regdma_base = rot->cmd_wr_ptr[ctx->q_id]
  2277. [sde_hw_rotator_get_regdma_ctxidx(ctx)];
  2278. ctx->regdma_wrptr = ctx->regdma_base;
  2279. ctx->ts_addr = (dma_addr_t)((u32 *)rot->swts_buf.addr +
  2280. ctx->q_id * SDE_HW_ROT_REGDMA_TOTAL_CTX +
  2281. sde_hw_rotator_get_regdma_ctxidx(ctx));
  2282. ctx->last_regdma_timestamp = SDE_REGDMA_SWTS_INVALID;
  2283. init_completion(&ctx->rot_comp);
  2284. init_waitqueue_head(&ctx->regdma_waitq);
  2285. /* Store rotator context for lookup purpose */
  2286. sde_hw_rotator_put_ctx(ctx);
  2287. SDEROT_DBG(
  2288. "New rot CTX:%pK, ctxidx:%d, session-id:%d, prio:%d, timestamp:%X, active:%d sbuf:%d\n",
  2289. ctx, sde_hw_rotator_get_regdma_ctxidx(ctx), ctx->session_id,
  2290. ctx->q_id, ctx->timestamp,
  2291. atomic_read(&ctx->hwres->num_active),
  2292. ctx->sbuf_mode);
  2293. return ctx;
  2294. }
  2295. /*
  2296. * sde_hw_rotator_free_rotctx - free the given rotator context
  2297. * @rot: Pointer to rotator hw
  2298. * @ctx: Pointer to rotator context
  2299. */
  2300. static void sde_hw_rotator_free_rotctx(struct sde_hw_rotator *rot,
  2301. struct sde_hw_rotator_context *ctx)
  2302. {
  2303. if (!rot || !ctx)
  2304. return;
  2305. SDEROT_DBG(
  2306. "Free rot CTX:%pK, ctxidx:%d, session-id:%d, prio:%d, timestamp:%X, active:%d sbuf:%d\n",
  2307. ctx, sde_hw_rotator_get_regdma_ctxidx(ctx), ctx->session_id,
  2308. ctx->q_id, ctx->timestamp,
  2309. atomic_read(&ctx->hwres->num_active),
  2310. ctx->sbuf_mode);
  2311. /* Clear rotator context from lookup purpose */
  2312. sde_hw_rotator_clr_ctx(ctx);
  2313. devm_kfree(&rot->pdev->dev, ctx);
  2314. }
  2315. /*
  2316. * sde_hw_rotator_config - configure hw for the given rotation entry
  2317. * @hw: Pointer to rotator resource
  2318. * @entry: Pointer to rotation entry
  2319. *
  2320. * This function setup the fetch/writeback/rotator blocks, as well as VBIF
  2321. * based on the given rotation entry.
  2322. */
  2323. static int sde_hw_rotator_config(struct sde_rot_hw_resource *hw,
  2324. struct sde_rot_entry *entry)
  2325. {
  2326. struct sde_rot_data_type *mdata = sde_rot_get_mdata();
  2327. struct sde_hw_rotator *rot;
  2328. struct sde_hw_rotator_resource_info *resinfo;
  2329. struct sde_hw_rotator_context *ctx;
  2330. struct sde_hw_rot_sspp_cfg sspp_cfg;
  2331. struct sde_hw_rot_wb_cfg wb_cfg;
  2332. u32 danger_lut = 0; /* applicable for realtime client only */
  2333. u32 safe_lut = 0; /* applicable for realtime client only */
  2334. u32 flags = 0;
  2335. u32 rststs = 0;
  2336. struct sde_rotation_item *item;
  2337. int ret;
  2338. if (!hw || !entry) {
  2339. SDEROT_ERR("null hw resource/entry\n");
  2340. return -EINVAL;
  2341. }
  2342. resinfo = container_of(hw, struct sde_hw_rotator_resource_info, hw);
  2343. rot = resinfo->rot;
  2344. item = &entry->item;
  2345. ctx = sde_hw_rotator_alloc_rotctx(rot, hw, item->session_id,
  2346. item->sequence_id, item->output.sbuf);
  2347. if (!ctx) {
  2348. SDEROT_ERR("Failed allocating rotator context!!\n");
  2349. return -EINVAL;
  2350. }
  2351. /* save entry for debugging purposes */
  2352. ctx->last_entry = entry;
  2353. if (test_bit(SDE_CAPS_SBUF_1, mdata->sde_caps_map)) {
  2354. if (entry->dst_buf.sbuf) {
  2355. u32 op_mode;
  2356. if (entry->item.trigger ==
  2357. SDE_ROTATOR_TRIGGER_COMMAND)
  2358. ctx->start_ctrl = (rot->cmd_trigger << 4);
  2359. else if (entry->item.trigger ==
  2360. SDE_ROTATOR_TRIGGER_VIDEO)
  2361. ctx->start_ctrl = (rot->vid_trigger << 4);
  2362. else
  2363. ctx->start_ctrl = 0;
  2364. ctx->sys_cache_mode = BIT(15) |
  2365. ((item->output.scid & 0x1f) << 8) |
  2366. (item->output.writeback ? 0x5 : 0);
  2367. ctx->op_mode = BIT(4) |
  2368. ((ctx->rot->sbuf_headroom & 0xff) << 8);
  2369. /* detect transition to inline mode */
  2370. op_mode = (SDE_ROTREG_READ(rot->mdss_base,
  2371. ROTTOP_OP_MODE) >> 4) & 0x3;
  2372. if (!op_mode) {
  2373. u32 status;
  2374. status = SDE_ROTREG_READ(rot->mdss_base,
  2375. ROTTOP_STATUS);
  2376. if (status & BIT(0)) {
  2377. SDEROT_ERR("rotator busy 0x%x\n",
  2378. status);
  2379. _sde_hw_rotator_dump_status(rot, NULL);
  2380. SDEROT_EVTLOG_TOUT_HANDLER("rot",
  2381. "vbif_dbg_bus",
  2382. "panic");
  2383. }
  2384. }
  2385. } else {
  2386. ctx->start_ctrl = BIT(0);
  2387. ctx->sys_cache_mode = 0;
  2388. ctx->op_mode = 0;
  2389. }
  2390. } else {
  2391. ctx->start_ctrl = BIT(0);
  2392. }
  2393. SDEROT_EVTLOG(ctx->start_ctrl, ctx->sys_cache_mode, ctx->op_mode);
  2394. /*
  2395. * if Rotator HW is reset, but missing PM event notification, we
  2396. * need to init the SW timestamp automatically.
  2397. */
  2398. rststs = SDE_ROTREG_READ(rot->mdss_base, REGDMA_RESET_STATUS_REG);
  2399. if (!rot->reset_hw_ts && rststs) {
  2400. u32 l_ts, h_ts, l_hwts, h_hwts;
  2401. h_hwts = __sde_hw_rotator_get_timestamp(rot,
  2402. ROT_QUEUE_HIGH_PRIORITY);
  2403. l_hwts = __sde_hw_rotator_get_timestamp(rot,
  2404. ROT_QUEUE_LOW_PRIORITY);
  2405. h_ts = atomic_read(&rot->timestamp[ROT_QUEUE_HIGH_PRIORITY]);
  2406. l_ts = atomic_read(&rot->timestamp[ROT_QUEUE_LOW_PRIORITY]);
  2407. SDEROT_EVTLOG(0xbad0, rststs, l_hwts, h_hwts, l_ts, h_ts);
  2408. if (ctx->q_id == ROT_QUEUE_HIGH_PRIORITY) {
  2409. h_ts = (h_ts - 1) & SDE_REGDMA_SWTS_MASK;
  2410. l_ts &= SDE_REGDMA_SWTS_MASK;
  2411. } else {
  2412. l_ts = (l_ts - 1) & SDE_REGDMA_SWTS_MASK;
  2413. h_ts &= SDE_REGDMA_SWTS_MASK;
  2414. }
  2415. SDEROT_DBG("h_ts:0x%x, l_ts;0x%x\n", h_ts, l_ts);
  2416. SDEROT_EVTLOG(0x900d, h_ts, l_ts);
  2417. rot->last_hwts[ROT_QUEUE_LOW_PRIORITY] = l_ts;
  2418. rot->last_hwts[ROT_QUEUE_HIGH_PRIORITY] = h_ts;
  2419. rot->ops.update_ts(rot, ROT_QUEUE_HIGH_PRIORITY, h_ts);
  2420. rot->ops.update_ts(rot, ROT_QUEUE_LOW_PRIORITY, l_ts);
  2421. SDE_ROTREG_WRITE(rot->mdss_base, REGDMA_RESET_STATUS_REG, 0);
  2422. /* ensure write is issued to the rotator HW */
  2423. wmb();
  2424. }
  2425. if (rot->reset_hw_ts) {
  2426. SDEROT_EVTLOG(rot->last_hwts[ROT_QUEUE_LOW_PRIORITY],
  2427. rot->last_hwts[ROT_QUEUE_HIGH_PRIORITY]);
  2428. rot->ops.update_ts(rot, ROT_QUEUE_HIGH_PRIORITY,
  2429. rot->last_hwts[ROT_QUEUE_HIGH_PRIORITY]);
  2430. rot->ops.update_ts(rot, ROT_QUEUE_LOW_PRIORITY,
  2431. rot->last_hwts[ROT_QUEUE_LOW_PRIORITY]);
  2432. SDE_ROTREG_WRITE(rot->mdss_base, REGDMA_RESET_STATUS_REG, 0);
  2433. /* ensure write is issued to the rotator HW */
  2434. wmb();
  2435. rot->reset_hw_ts = false;
  2436. }
  2437. flags = (item->flags & SDE_ROTATION_FLIP_LR) ?
  2438. SDE_ROT_FLAG_FLIP_LR : 0;
  2439. flags |= (item->flags & SDE_ROTATION_FLIP_UD) ?
  2440. SDE_ROT_FLAG_FLIP_UD : 0;
  2441. flags |= (item->flags & SDE_ROTATION_90) ?
  2442. SDE_ROT_FLAG_ROT_90 : 0;
  2443. flags |= (item->flags & SDE_ROTATION_DEINTERLACE) ?
  2444. SDE_ROT_FLAG_DEINTERLACE : 0;
  2445. flags |= (item->flags & SDE_ROTATION_SECURE) ?
  2446. SDE_ROT_FLAG_SECURE_OVERLAY_SESSION : 0;
  2447. flags |= (item->flags & SDE_ROTATION_SECURE_CAMERA) ?
  2448. SDE_ROT_FLAG_SECURE_CAMERA_SESSION : 0;
  2449. sspp_cfg.img_width = item->input.width;
  2450. sspp_cfg.img_height = item->input.height;
  2451. sspp_cfg.fps = entry->perf->config.frame_rate;
  2452. sspp_cfg.bw = entry->perf->bw;
  2453. sspp_cfg.fmt = sde_get_format_params(item->input.format);
  2454. if (!sspp_cfg.fmt) {
  2455. SDEROT_ERR("null format\n");
  2456. ret = -EINVAL;
  2457. goto error;
  2458. }
  2459. sspp_cfg.src_rect = &item->src_rect;
  2460. sspp_cfg.data = &entry->src_buf;
  2461. sde_mdp_get_plane_sizes(sspp_cfg.fmt, item->input.width,
  2462. item->input.height, &sspp_cfg.src_plane,
  2463. 0, /* No bwc_mode */
  2464. (flags & SDE_ROT_FLAG_SOURCE_ROTATED_90) ?
  2465. true : false);
  2466. rot->ops.setup_rotator_fetchengine(ctx, ctx->q_id,
  2467. &sspp_cfg, danger_lut, safe_lut,
  2468. entry->dnsc_factor_w, entry->dnsc_factor_h, flags);
  2469. wb_cfg.img_width = item->output.width;
  2470. wb_cfg.img_height = item->output.height;
  2471. wb_cfg.fps = entry->perf->config.frame_rate;
  2472. wb_cfg.bw = entry->perf->bw;
  2473. wb_cfg.fmt = sde_get_format_params(item->output.format);
  2474. if (!wb_cfg.fmt) {
  2475. SDEROT_ERR("null format\n");
  2476. ret = -EINVAL;
  2477. goto error;
  2478. }
  2479. wb_cfg.dst_rect = &item->dst_rect;
  2480. wb_cfg.data = &entry->dst_buf;
  2481. sde_mdp_get_plane_sizes(wb_cfg.fmt, item->output.width,
  2482. item->output.height, &wb_cfg.dst_plane,
  2483. 0, /* No bwc_mode */
  2484. (flags & SDE_ROT_FLAG_ROT_90) ? true : false);
  2485. wb_cfg.v_downscale_factor = entry->dnsc_factor_h;
  2486. wb_cfg.h_downscale_factor = entry->dnsc_factor_w;
  2487. wb_cfg.prefill_bw = item->prefill_bw;
  2488. rot->ops.setup_rotator_wbengine(ctx, ctx->q_id, &wb_cfg, flags);
  2489. /* setup VA mapping for debugfs */
  2490. if (rot->dbgmem) {
  2491. sde_hw_rotator_map_vaddr(&ctx->src_dbgbuf,
  2492. &item->input,
  2493. &entry->src_buf);
  2494. sde_hw_rotator_map_vaddr(&ctx->dst_dbgbuf,
  2495. &item->output,
  2496. &entry->dst_buf);
  2497. }
  2498. SDEROT_EVTLOG(ctx->timestamp, flags,
  2499. item->input.width, item->input.height,
  2500. item->output.width, item->output.height,
  2501. entry->src_buf.p[0].addr, entry->dst_buf.p[0].addr,
  2502. item->input.format, item->output.format,
  2503. entry->perf->config.frame_rate);
  2504. /* initialize static vbif setting */
  2505. sde_mdp_init_vbif();
  2506. if (!ctx->sbuf_mode && mdata->default_ot_rd_limit) {
  2507. struct sde_mdp_set_ot_params ot_params;
  2508. memset(&ot_params, 0, sizeof(struct sde_mdp_set_ot_params));
  2509. ot_params.xin_id = mdata->vbif_xin_id[XIN_SSPP];
  2510. ot_params.num = 0; /* not used */
  2511. ot_params.width = entry->perf->config.input.width;
  2512. ot_params.height = entry->perf->config.input.height;
  2513. ot_params.fps = entry->perf->config.frame_rate;
  2514. ot_params.reg_off_vbif_lim_conf = MMSS_VBIF_RD_LIM_CONF;
  2515. ot_params.reg_off_mdp_clk_ctrl =
  2516. MMSS_VBIF_NRT_VBIF_CLK_FORCE_CTRL0;
  2517. ot_params.bit_off_mdp_clk_ctrl =
  2518. MMSS_VBIF_NRT_VBIF_CLK_FORCE_CTRL0_XIN0;
  2519. ot_params.fmt = ctx->is_traffic_shaping ?
  2520. SDE_PIX_FMT_ABGR_8888 :
  2521. entry->perf->config.input.format;
  2522. ot_params.rotsts_base = rot->mdss_base + ROTTOP_STATUS;
  2523. ot_params.rotsts_busy_mask = ROT_BUSY_BIT;
  2524. sde_mdp_set_ot_limit(&ot_params);
  2525. }
  2526. if (!ctx->sbuf_mode && mdata->default_ot_wr_limit) {
  2527. struct sde_mdp_set_ot_params ot_params;
  2528. memset(&ot_params, 0, sizeof(struct sde_mdp_set_ot_params));
  2529. ot_params.xin_id = mdata->vbif_xin_id[XIN_WRITEBACK];
  2530. ot_params.num = 0; /* not used */
  2531. ot_params.width = entry->perf->config.input.width;
  2532. ot_params.height = entry->perf->config.input.height;
  2533. ot_params.fps = entry->perf->config.frame_rate;
  2534. ot_params.reg_off_vbif_lim_conf = MMSS_VBIF_WR_LIM_CONF;
  2535. ot_params.reg_off_mdp_clk_ctrl =
  2536. MMSS_VBIF_NRT_VBIF_CLK_FORCE_CTRL0;
  2537. ot_params.bit_off_mdp_clk_ctrl =
  2538. MMSS_VBIF_NRT_VBIF_CLK_FORCE_CTRL0_XIN1;
  2539. ot_params.fmt = ctx->is_traffic_shaping ?
  2540. SDE_PIX_FMT_ABGR_8888 :
  2541. entry->perf->config.input.format;
  2542. ot_params.rotsts_base = rot->mdss_base + ROTTOP_STATUS;
  2543. ot_params.rotsts_busy_mask = ROT_BUSY_BIT;
  2544. sde_mdp_set_ot_limit(&ot_params);
  2545. }
  2546. if (test_bit(SDE_QOS_PER_PIPE_LUT, mdata->sde_qos_map)) {
  2547. u32 qos_lut = 0; /* low priority for nrt read client */
  2548. trace_rot_perf_set_qos_luts(mdata->vbif_xin_id[XIN_SSPP],
  2549. sspp_cfg.fmt->format, qos_lut,
  2550. sde_mdp_is_linear_format(sspp_cfg.fmt));
  2551. SDE_ROTREG_WRITE(rot->mdss_base, ROT_SSPP_CREQ_LUT, qos_lut);
  2552. }
  2553. /* VBIF QoS and other settings */
  2554. if (!ctx->sbuf_mode) {
  2555. if (mdata->parent_pdev)
  2556. sde_hw_rotator_vbif_rt_setting();
  2557. else
  2558. sde_hw_rotator_vbif_setting(rot);
  2559. }
  2560. return 0;
  2561. error:
  2562. sde_hw_rotator_free_rotctx(rot, ctx);
  2563. return ret;
  2564. }
  2565. /*
  2566. * sde_hw_rotator_cancel - cancel hw configuration for the given rotation entry
  2567. * @hw: Pointer to rotator resource
  2568. * @entry: Pointer to rotation entry
  2569. *
  2570. * This function cancels a previously configured rotation entry.
  2571. */
  2572. static int sde_hw_rotator_cancel(struct sde_rot_hw_resource *hw,
  2573. struct sde_rot_entry *entry)
  2574. {
  2575. struct sde_hw_rotator *rot;
  2576. struct sde_hw_rotator_resource_info *resinfo;
  2577. struct sde_hw_rotator_context *ctx;
  2578. unsigned long flags;
  2579. if (!hw || !entry) {
  2580. SDEROT_ERR("null hw resource/entry\n");
  2581. return -EINVAL;
  2582. }
  2583. resinfo = container_of(hw, struct sde_hw_rotator_resource_info, hw);
  2584. rot = resinfo->rot;
  2585. /* Lookup rotator context from session-id */
  2586. ctx = sde_hw_rotator_get_ctx(rot, entry->item.session_id,
  2587. entry->item.sequence_id, hw->wb_id);
  2588. if (!ctx) {
  2589. SDEROT_ERR("Cannot locate rotator ctx from sesison id:%d\n",
  2590. entry->item.session_id);
  2591. return -EINVAL;
  2592. }
  2593. spin_lock_irqsave(&rot->rotisr_lock, flags);
  2594. rot->ops.update_ts(rot, ctx->q_id, ctx->timestamp);
  2595. spin_unlock_irqrestore(&rot->rotisr_lock, flags);
  2596. SDEROT_EVTLOG(entry->item.session_id, ctx->timestamp);
  2597. if (rot->dbgmem) {
  2598. sde_hw_rotator_unmap_vaddr(&ctx->src_dbgbuf);
  2599. sde_hw_rotator_unmap_vaddr(&ctx->dst_dbgbuf);
  2600. }
  2601. /* Current rotator context job is finished, time to free up */
  2602. sde_hw_rotator_free_rotctx(rot, ctx);
  2603. return 0;
  2604. }
  2605. /*
  2606. * sde_hw_rotator_kickoff - kickoff processing on the given entry
  2607. * @hw: Pointer to rotator resource
  2608. * @entry: Pointer to rotation entry
  2609. */
  2610. static int sde_hw_rotator_kickoff(struct sde_rot_hw_resource *hw,
  2611. struct sde_rot_entry *entry)
  2612. {
  2613. struct sde_hw_rotator *rot;
  2614. struct sde_hw_rotator_resource_info *resinfo;
  2615. struct sde_hw_rotator_context *ctx;
  2616. if (!hw || !entry) {
  2617. SDEROT_ERR("null hw resource/entry\n");
  2618. return -EINVAL;
  2619. }
  2620. resinfo = container_of(hw, struct sde_hw_rotator_resource_info, hw);
  2621. rot = resinfo->rot;
  2622. /* Lookup rotator context from session-id */
  2623. ctx = sde_hw_rotator_get_ctx(rot, entry->item.session_id,
  2624. entry->item.sequence_id, hw->wb_id);
  2625. if (!ctx) {
  2626. SDEROT_ERR("Cannot locate rotator ctx from sesison id:%d\n",
  2627. entry->item.session_id);
  2628. return -EINVAL;
  2629. }
  2630. rot->ops.start_rotator(ctx, ctx->q_id);
  2631. return 0;
  2632. }
  2633. static int sde_hw_rotator_abort_kickoff(struct sde_rot_hw_resource *hw,
  2634. struct sde_rot_entry *entry)
  2635. {
  2636. struct sde_hw_rotator *rot;
  2637. struct sde_hw_rotator_resource_info *resinfo;
  2638. struct sde_hw_rotator_context *ctx;
  2639. unsigned long flags;
  2640. if (!hw || !entry) {
  2641. SDEROT_ERR("null hw resource/entry\n");
  2642. return -EINVAL;
  2643. }
  2644. resinfo = container_of(hw, struct sde_hw_rotator_resource_info, hw);
  2645. rot = resinfo->rot;
  2646. /* Lookup rotator context from session-id */
  2647. ctx = sde_hw_rotator_get_ctx(rot, entry->item.session_id,
  2648. entry->item.sequence_id, hw->wb_id);
  2649. if (!ctx) {
  2650. SDEROT_ERR("Cannot locate rotator ctx from sesison id:%d\n",
  2651. entry->item.session_id);
  2652. return -EINVAL;
  2653. }
  2654. spin_lock_irqsave(&rot->rotisr_lock, flags);
  2655. rot->ops.update_ts(rot, ctx->q_id, ctx->timestamp);
  2656. ctx->abort = true;
  2657. wake_up_all(&ctx->regdma_waitq);
  2658. spin_unlock_irqrestore(&rot->rotisr_lock, flags);
  2659. SDEROT_EVTLOG(entry->item.session_id, ctx->timestamp);
  2660. return 0;
  2661. }
  2662. /*
  2663. * sde_hw_rotator_wait4done - wait for completion notification
  2664. * @hw: Pointer to rotator resource
  2665. * @entry: Pointer to rotation entry
  2666. *
  2667. * This function blocks until the given entry is complete, error
  2668. * is detected, or timeout.
  2669. */
  2670. static int sde_hw_rotator_wait4done(struct sde_rot_hw_resource *hw,
  2671. struct sde_rot_entry *entry)
  2672. {
  2673. struct sde_hw_rotator *rot;
  2674. struct sde_hw_rotator_resource_info *resinfo;
  2675. struct sde_hw_rotator_context *ctx;
  2676. int ret;
  2677. if (!hw || !entry) {
  2678. SDEROT_ERR("null hw resource/entry\n");
  2679. return -EINVAL;
  2680. }
  2681. resinfo = container_of(hw, struct sde_hw_rotator_resource_info, hw);
  2682. rot = resinfo->rot;
  2683. /* Lookup rotator context from session-id */
  2684. ctx = sde_hw_rotator_get_ctx(rot, entry->item.session_id,
  2685. entry->item.sequence_id, hw->wb_id);
  2686. if (!ctx) {
  2687. SDEROT_ERR("Cannot locate rotator ctx from sesison id:%d\n",
  2688. entry->item.session_id);
  2689. return -EINVAL;
  2690. }
  2691. ret = rot->ops.wait_rotator_done(ctx, ctx->q_id, 0);
  2692. if (rot->dbgmem) {
  2693. sde_hw_rotator_unmap_vaddr(&ctx->src_dbgbuf);
  2694. sde_hw_rotator_unmap_vaddr(&ctx->dst_dbgbuf);
  2695. }
  2696. /* Current rotator context job is finished, time to free up*/
  2697. sde_hw_rotator_free_rotctx(rot, ctx);
  2698. return ret;
  2699. }
  2700. /*
  2701. * sde_rotator_hw_rev_init - setup feature and/or capability bitmask
  2702. * @rot: Pointer to hw rotator
  2703. *
  2704. * This function initializes feature and/or capability bitmask based on
  2705. * h/w version read from the device.
  2706. */
  2707. static int sde_rotator_hw_rev_init(struct sde_hw_rotator *rot)
  2708. {
  2709. struct sde_rot_data_type *mdata = sde_rot_get_mdata();
  2710. u32 hw_version;
  2711. if (!mdata) {
  2712. SDEROT_ERR("null rotator data\n");
  2713. return -EINVAL;
  2714. }
  2715. hw_version = SDE_ROTREG_READ(rot->mdss_base, ROTTOP_HW_VERSION);
  2716. SDEROT_DBG("hw version %8.8x\n", hw_version);
  2717. clear_bit(SDE_QOS_PER_PIPE_IB, mdata->sde_qos_map);
  2718. set_bit(SDE_QOS_OVERHEAD_FACTOR, mdata->sde_qos_map);
  2719. set_bit(SDE_QOS_OTLIM, mdata->sde_qos_map);
  2720. set_bit(SDE_QOS_PER_PIPE_LUT, mdata->sde_qos_map);
  2721. clear_bit(SDE_QOS_SIMPLIFIED_PREFILL, mdata->sde_qos_map);
  2722. set_bit(SDE_CAPS_R3_WB, mdata->sde_caps_map);
  2723. /* features exposed via rotator top h/w version */
  2724. if (hw_version != SDE_ROT_TYPE_V1_0) {
  2725. SDEROT_DBG("Supporting 1.5 downscale for SDE Rotator\n");
  2726. set_bit(SDE_CAPS_R3_1P5_DOWNSCALE, mdata->sde_caps_map);
  2727. }
  2728. set_bit(SDE_CAPS_SEC_ATTACH_DETACH_SMMU, mdata->sde_caps_map);
  2729. mdata->nrt_vbif_dbg_bus = nrt_vbif_dbg_bus_r3;
  2730. mdata->nrt_vbif_dbg_bus_size =
  2731. ARRAY_SIZE(nrt_vbif_dbg_bus_r3);
  2732. mdata->rot_dbg_bus = rot_dbgbus_r3;
  2733. mdata->rot_dbg_bus_size = ARRAY_SIZE(rot_dbgbus_r3);
  2734. mdata->regdump = sde_rot_r3_regdump;
  2735. mdata->regdump_size = ARRAY_SIZE(sde_rot_r3_regdump);
  2736. SDE_ROTREG_WRITE(rot->mdss_base, REGDMA_TIMESTAMP_REG, 0);
  2737. /* features exposed via mdss h/w version */
  2738. if (IS_SDE_MAJOR_MINOR_SAME(mdata->mdss_version, SDE_MDP_HW_REV_600)) {
  2739. SDEROT_DBG("Supporting sys cache inline rotation\n");
  2740. set_bit(SDE_CAPS_SBUF_1, mdata->sde_caps_map);
  2741. set_bit(SDE_CAPS_UBWC_4, mdata->sde_caps_map);
  2742. set_bit(SDE_CAPS_PARTIALWR, mdata->sde_caps_map);
  2743. set_bit(SDE_CAPS_HW_TIMESTAMP, mdata->sde_caps_map);
  2744. rot->inpixfmts[SDE_ROTATOR_MODE_OFFLINE] =
  2745. sde_hw_rotator_v4_inpixfmts;
  2746. rot->num_inpixfmt[SDE_ROTATOR_MODE_OFFLINE] =
  2747. ARRAY_SIZE(sde_hw_rotator_v4_inpixfmts);
  2748. rot->outpixfmts[SDE_ROTATOR_MODE_OFFLINE] =
  2749. sde_hw_rotator_v4_outpixfmts;
  2750. rot->num_outpixfmt[SDE_ROTATOR_MODE_OFFLINE] =
  2751. ARRAY_SIZE(sde_hw_rotator_v4_outpixfmts);
  2752. rot->inpixfmts[SDE_ROTATOR_MODE_SBUF] =
  2753. sde_hw_rotator_v4_inpixfmts_sbuf;
  2754. rot->num_inpixfmt[SDE_ROTATOR_MODE_SBUF] =
  2755. ARRAY_SIZE(sde_hw_rotator_v4_inpixfmts_sbuf);
  2756. rot->outpixfmts[SDE_ROTATOR_MODE_SBUF] =
  2757. sde_hw_rotator_v4_outpixfmts_sbuf;
  2758. rot->num_outpixfmt[SDE_ROTATOR_MODE_SBUF] =
  2759. ARRAY_SIZE(sde_hw_rotator_v4_outpixfmts_sbuf);
  2760. rot->downscale_caps =
  2761. "LINEAR/1.5/2/4/8/16/32/64 TILE/1.5/2/4 TP10/1.5/2";
  2762. } else if (IS_SDE_MAJOR_MINOR_SAME(mdata->mdss_version,
  2763. SDE_MDP_HW_REV_500)) {
  2764. SDEROT_DBG("Supporting sys cache inline rotation\n");
  2765. set_bit(SDE_CAPS_SBUF_1, mdata->sde_caps_map);
  2766. set_bit(SDE_CAPS_UBWC_3, mdata->sde_caps_map);
  2767. set_bit(SDE_CAPS_PARTIALWR, mdata->sde_caps_map);
  2768. set_bit(SDE_CAPS_HW_TIMESTAMP, mdata->sde_caps_map);
  2769. rot->inpixfmts[SDE_ROTATOR_MODE_OFFLINE] =
  2770. sde_hw_rotator_v4_inpixfmts;
  2771. rot->num_inpixfmt[SDE_ROTATOR_MODE_OFFLINE] =
  2772. ARRAY_SIZE(sde_hw_rotator_v4_inpixfmts);
  2773. rot->outpixfmts[SDE_ROTATOR_MODE_OFFLINE] =
  2774. sde_hw_rotator_v4_outpixfmts;
  2775. rot->num_outpixfmt[SDE_ROTATOR_MODE_OFFLINE] =
  2776. ARRAY_SIZE(sde_hw_rotator_v4_outpixfmts);
  2777. rot->inpixfmts[SDE_ROTATOR_MODE_SBUF] =
  2778. sde_hw_rotator_v4_inpixfmts_sbuf;
  2779. rot->num_inpixfmt[SDE_ROTATOR_MODE_SBUF] =
  2780. ARRAY_SIZE(sde_hw_rotator_v4_inpixfmts_sbuf);
  2781. rot->outpixfmts[SDE_ROTATOR_MODE_SBUF] =
  2782. sde_hw_rotator_v4_outpixfmts_sbuf;
  2783. rot->num_outpixfmt[SDE_ROTATOR_MODE_SBUF] =
  2784. ARRAY_SIZE(sde_hw_rotator_v4_outpixfmts_sbuf);
  2785. rot->downscale_caps =
  2786. "LINEAR/1.5/2/4/8/16/32/64 TILE/1.5/2/4 TP10/1.5/2";
  2787. } else if (IS_SDE_MAJOR_MINOR_SAME(mdata->mdss_version,
  2788. SDE_MDP_HW_REV_530) ||
  2789. IS_SDE_MAJOR_MINOR_SAME(mdata->mdss_version,
  2790. SDE_MDP_HW_REV_520)) {
  2791. SDEROT_DBG("Supporting sys cache inline rotation\n");
  2792. set_bit(SDE_CAPS_SBUF_1, mdata->sde_caps_map);
  2793. set_bit(SDE_CAPS_UBWC_2, mdata->sde_caps_map);
  2794. set_bit(SDE_CAPS_PARTIALWR, mdata->sde_caps_map);
  2795. set_bit(SDE_CAPS_HW_TIMESTAMP, mdata->sde_caps_map);
  2796. rot->inpixfmts[SDE_ROTATOR_MODE_OFFLINE] =
  2797. sde_hw_rotator_v4_inpixfmts;
  2798. rot->num_inpixfmt[SDE_ROTATOR_MODE_OFFLINE] =
  2799. ARRAY_SIZE(sde_hw_rotator_v4_inpixfmts);
  2800. rot->outpixfmts[SDE_ROTATOR_MODE_OFFLINE] =
  2801. sde_hw_rotator_v4_outpixfmts;
  2802. rot->num_outpixfmt[SDE_ROTATOR_MODE_OFFLINE] =
  2803. ARRAY_SIZE(sde_hw_rotator_v4_outpixfmts);
  2804. rot->inpixfmts[SDE_ROTATOR_MODE_SBUF] =
  2805. sde_hw_rotator_v4_inpixfmts_sbuf;
  2806. rot->num_inpixfmt[SDE_ROTATOR_MODE_SBUF] =
  2807. ARRAY_SIZE(sde_hw_rotator_v4_inpixfmts_sbuf);
  2808. rot->outpixfmts[SDE_ROTATOR_MODE_SBUF] =
  2809. sde_hw_rotator_v4_outpixfmts_sbuf;
  2810. rot->num_outpixfmt[SDE_ROTATOR_MODE_SBUF] =
  2811. ARRAY_SIZE(sde_hw_rotator_v4_outpixfmts_sbuf);
  2812. rot->downscale_caps =
  2813. "LINEAR/1.5/2/4/8/16/32/64 TILE/1.5/2/4 TP10/1.5/2";
  2814. } else if (IS_SDE_MAJOR_MINOR_SAME(mdata->mdss_version,
  2815. SDE_MDP_HW_REV_540)) {
  2816. SDEROT_DBG("Sys cache inline rotation not supported\n");
  2817. set_bit(SDE_CAPS_UBWC_2, mdata->sde_caps_map);
  2818. set_bit(SDE_CAPS_PARTIALWR, mdata->sde_caps_map);
  2819. set_bit(SDE_CAPS_HW_TIMESTAMP, mdata->sde_caps_map);
  2820. rot->inpixfmts[SDE_ROTATOR_MODE_OFFLINE] =
  2821. sde_hw_rotator_v4_inpixfmts;
  2822. rot->num_inpixfmt[SDE_ROTATOR_MODE_OFFLINE] =
  2823. ARRAY_SIZE(sde_hw_rotator_v4_inpixfmts);
  2824. rot->outpixfmts[SDE_ROTATOR_MODE_OFFLINE] =
  2825. sde_hw_rotator_v4_outpixfmts;
  2826. rot->num_outpixfmt[SDE_ROTATOR_MODE_OFFLINE] =
  2827. ARRAY_SIZE(sde_hw_rotator_v4_outpixfmts);
  2828. rot->downscale_caps =
  2829. "LINEAR/1.5/2/4/8/16/32/64 TILE/1.5/2/4 TP10/1.5/2";
  2830. } else if (IS_SDE_MAJOR_MINOR_SAME(mdata->mdss_version,
  2831. SDE_MDP_HW_REV_400) ||
  2832. IS_SDE_MAJOR_MINOR_SAME(mdata->mdss_version,
  2833. SDE_MDP_HW_REV_410)) {
  2834. SDEROT_DBG("Supporting sys cache inline rotation\n");
  2835. set_bit(SDE_CAPS_SBUF_1, mdata->sde_caps_map);
  2836. set_bit(SDE_CAPS_UBWC_2, mdata->sde_caps_map);
  2837. set_bit(SDE_CAPS_PARTIALWR, mdata->sde_caps_map);
  2838. rot->inpixfmts[SDE_ROTATOR_MODE_OFFLINE] =
  2839. sde_hw_rotator_v4_inpixfmts;
  2840. rot->num_inpixfmt[SDE_ROTATOR_MODE_OFFLINE] =
  2841. ARRAY_SIZE(sde_hw_rotator_v4_inpixfmts);
  2842. rot->outpixfmts[SDE_ROTATOR_MODE_OFFLINE] =
  2843. sde_hw_rotator_v4_outpixfmts;
  2844. rot->num_outpixfmt[SDE_ROTATOR_MODE_OFFLINE] =
  2845. ARRAY_SIZE(sde_hw_rotator_v4_outpixfmts);
  2846. rot->inpixfmts[SDE_ROTATOR_MODE_SBUF] =
  2847. sde_hw_rotator_v4_inpixfmts_sbuf;
  2848. rot->num_inpixfmt[SDE_ROTATOR_MODE_SBUF] =
  2849. ARRAY_SIZE(sde_hw_rotator_v4_inpixfmts_sbuf);
  2850. rot->outpixfmts[SDE_ROTATOR_MODE_SBUF] =
  2851. sde_hw_rotator_v4_outpixfmts_sbuf;
  2852. rot->num_outpixfmt[SDE_ROTATOR_MODE_SBUF] =
  2853. ARRAY_SIZE(sde_hw_rotator_v4_outpixfmts_sbuf);
  2854. rot->downscale_caps =
  2855. "LINEAR/1.5/2/4/8/16/32/64 TILE/1.5/2/4 TP10/1.5/2";
  2856. } else {
  2857. rot->inpixfmts[SDE_ROTATOR_MODE_OFFLINE] =
  2858. sde_hw_rotator_v3_inpixfmts;
  2859. rot->num_inpixfmt[SDE_ROTATOR_MODE_OFFLINE] =
  2860. ARRAY_SIZE(sde_hw_rotator_v3_inpixfmts);
  2861. rot->outpixfmts[SDE_ROTATOR_MODE_OFFLINE] =
  2862. sde_hw_rotator_v3_outpixfmts;
  2863. rot->num_outpixfmt[SDE_ROTATOR_MODE_OFFLINE] =
  2864. ARRAY_SIZE(sde_hw_rotator_v3_outpixfmts);
  2865. rot->downscale_caps = (hw_version == SDE_ROT_TYPE_V1_0) ?
  2866. "LINEAR/2/4/8/16/32/64 TILE/2/4 TP10/2" :
  2867. "LINEAR/1.5/2/4/8/16/32/64 TILE/1.5/2/4 TP10/1.5/2";
  2868. }
  2869. return 0;
  2870. }
  2871. /*
  2872. * sde_hw_rotator_rotirq_handler - non-regdma interrupt handler
  2873. * @irq: Interrupt number
  2874. * @ptr: Pointer to private handle provided during registration
  2875. *
  2876. * This function services rotator interrupt and wakes up waiting client
  2877. * with pending rotation requests already submitted to h/w.
  2878. */
  2879. static irqreturn_t sde_hw_rotator_rotirq_handler(int irq, void *ptr)
  2880. {
  2881. struct sde_hw_rotator *rot = ptr;
  2882. struct sde_hw_rotator_context *ctx;
  2883. irqreturn_t ret = IRQ_NONE;
  2884. u32 isr;
  2885. isr = SDE_ROTREG_READ(rot->mdss_base, ROTTOP_INTR_STATUS);
  2886. SDEROT_DBG("intr_status = %8.8x\n", isr);
  2887. if (isr & ROT_DONE_MASK) {
  2888. if (rot->irq_num >= 0)
  2889. sde_hw_rotator_disable_irq(rot);
  2890. SDEROT_DBG("Notify rotator complete\n");
  2891. /* Normal rotator only 1 session, no need to lookup */
  2892. ctx = rot->rotCtx[0][0];
  2893. WARN_ON(ctx == NULL);
  2894. complete_all(&ctx->rot_comp);
  2895. spin_lock(&rot->rotisr_lock);
  2896. SDE_ROTREG_WRITE(rot->mdss_base, ROTTOP_INTR_CLEAR,
  2897. ROT_DONE_CLEAR);
  2898. spin_unlock(&rot->rotisr_lock);
  2899. ret = IRQ_HANDLED;
  2900. }
  2901. return ret;
  2902. }
  2903. /*
  2904. * sde_hw_rotator_regdmairq_handler - regdma interrupt handler
  2905. * @irq: Interrupt number
  2906. * @ptr: Pointer to private handle provided during registration
  2907. *
  2908. * This function services rotator interrupt, decoding the source of
  2909. * events (high/low priority queue), and wakes up all waiting clients
  2910. * with pending rotation requests already submitted to h/w.
  2911. */
  2912. static irqreturn_t sde_hw_rotator_regdmairq_handler(int irq, void *ptr)
  2913. {
  2914. struct sde_rot_data_type *mdata = sde_rot_get_mdata();
  2915. struct sde_hw_rotator *rot = ptr;
  2916. struct sde_hw_rotator_context *ctx, *tmp;
  2917. irqreturn_t ret = IRQ_NONE;
  2918. u32 isr, isr_tmp;
  2919. u32 ts;
  2920. u32 q_id;
  2921. isr = SDE_ROTREG_READ(rot->mdss_base, REGDMA_CSR_REGDMA_INT_STATUS);
  2922. /* acknowledge interrupt before reading latest timestamp */
  2923. SDE_ROTREG_WRITE(rot->mdss_base, REGDMA_CSR_REGDMA_INT_CLEAR, isr);
  2924. SDEROT_DBG("intr_status = %8.8x\n", isr);
  2925. /* Any REGDMA status, including error and watchdog timer, should
  2926. * trigger and wake up waiting thread
  2927. */
  2928. if (isr & (REGDMA_INT_HIGH_MASK | REGDMA_INT_LOW_MASK)) {
  2929. spin_lock(&rot->rotisr_lock);
  2930. /*
  2931. * Obtain rotator context based on timestamp from regdma
  2932. * and low/high interrupt status
  2933. */
  2934. if (isr & REGDMA_INT_HIGH_MASK) {
  2935. q_id = ROT_QUEUE_HIGH_PRIORITY;
  2936. } else if (isr & REGDMA_INT_LOW_MASK) {
  2937. q_id = ROT_QUEUE_LOW_PRIORITY;
  2938. } else {
  2939. SDEROT_ERR("unknown ISR status: isr=0x%X\n", isr);
  2940. goto done_isr_handle;
  2941. }
  2942. ts = __sde_hw_rotator_get_timestamp(rot, q_id);
  2943. /*
  2944. * Timestamp packet is not available in sbuf mode.
  2945. * Simulate timestamp update in the handler instead.
  2946. */
  2947. if (test_bit(SDE_CAPS_HW_TIMESTAMP, mdata->sde_caps_map) ||
  2948. list_empty(&rot->sbuf_ctx[q_id]))
  2949. goto skip_sbuf;
  2950. ctx = NULL;
  2951. isr_tmp = isr;
  2952. list_for_each_entry(tmp, &rot->sbuf_ctx[q_id], list) {
  2953. u32 mask;
  2954. mask = tmp->timestamp & 0x1 ? REGDMA_INT_1_MASK :
  2955. REGDMA_INT_0_MASK;
  2956. if (isr_tmp & mask) {
  2957. isr_tmp &= ~mask;
  2958. ctx = tmp;
  2959. ts = ctx->timestamp;
  2960. rot->ops.update_ts(rot, ctx->q_id, ts);
  2961. SDEROT_DBG("update swts:0x%X\n", ts);
  2962. }
  2963. SDEROT_EVTLOG(isr, tmp->timestamp);
  2964. }
  2965. if (ctx == NULL)
  2966. SDEROT_ERR("invalid swts ctx\n");
  2967. skip_sbuf:
  2968. ctx = rot->rotCtx[q_id][ts & SDE_HW_ROT_REGDMA_SEG_MASK];
  2969. /*
  2970. * Wake up all waiting context from the current and previous
  2971. * SW Timestamp.
  2972. */
  2973. while (ctx &&
  2974. sde_hw_rotator_elapsed_swts(ctx->timestamp, ts) >= 0) {
  2975. ctx->last_regdma_isr_status = isr;
  2976. ctx->last_regdma_timestamp = ts;
  2977. SDEROT_DBG(
  2978. "regdma complete: ctx:%pK, ts:%X\n", ctx, ts);
  2979. wake_up_all(&ctx->regdma_waitq);
  2980. ts = (ts - 1) & SDE_REGDMA_SWTS_MASK;
  2981. ctx = rot->rotCtx[q_id]
  2982. [ts & SDE_HW_ROT_REGDMA_SEG_MASK];
  2983. }
  2984. done_isr_handle:
  2985. spin_unlock(&rot->rotisr_lock);
  2986. ret = IRQ_HANDLED;
  2987. } else if (isr & REGDMA_INT_ERR_MASK) {
  2988. /*
  2989. * For REGDMA Err, we save the isr info and wake up
  2990. * all waiting contexts
  2991. */
  2992. int i, j;
  2993. SDEROT_ERR(
  2994. "regdma err isr:%X, wake up all waiting contexts\n",
  2995. isr);
  2996. spin_lock(&rot->rotisr_lock);
  2997. for (i = 0; i < ROT_QUEUE_MAX; i++) {
  2998. for (j = 0; j < SDE_HW_ROT_REGDMA_TOTAL_CTX; j++) {
  2999. ctx = rot->rotCtx[i][j];
  3000. if (ctx && ctx->last_regdma_isr_status == 0) {
  3001. ts = __sde_hw_rotator_get_timestamp(
  3002. rot, i);
  3003. ctx->last_regdma_isr_status = isr;
  3004. ctx->last_regdma_timestamp = ts;
  3005. wake_up_all(&ctx->regdma_waitq);
  3006. SDEROT_DBG(
  3007. "Wakeup rotctx[%d][%d]:%pK\n",
  3008. i, j, ctx);
  3009. }
  3010. }
  3011. }
  3012. spin_unlock(&rot->rotisr_lock);
  3013. ret = IRQ_HANDLED;
  3014. }
  3015. return ret;
  3016. }
  3017. /*
  3018. * sde_hw_rotator_validate_entry - validate rotation entry
  3019. * @mgr: Pointer to rotator manager
  3020. * @entry: Pointer to rotation entry
  3021. *
  3022. * This function validates the given rotation entry and provides possible
  3023. * fixup (future improvement) if available. This function returns 0 if
  3024. * the entry is valid, and returns error code otherwise.
  3025. */
  3026. static int sde_hw_rotator_validate_entry(struct sde_rot_mgr *mgr,
  3027. struct sde_rot_entry *entry)
  3028. {
  3029. struct sde_rot_data_type *mdata = sde_rot_get_mdata();
  3030. struct sde_hw_rotator *hw_data;
  3031. int ret = 0;
  3032. u16 src_w, src_h, dst_w, dst_h;
  3033. struct sde_rotation_item *item = &entry->item;
  3034. struct sde_mdp_format_params *fmt;
  3035. if (!mgr || !entry || !mgr->hw_data) {
  3036. SDEROT_ERR("invalid parameters\n");
  3037. return -EINVAL;
  3038. }
  3039. hw_data = mgr->hw_data;
  3040. if (hw_data->maxlinewidth < item->src_rect.w) {
  3041. SDEROT_ERR("invalid src width %u\n", item->src_rect.w);
  3042. return -EINVAL;
  3043. }
  3044. src_w = item->src_rect.w;
  3045. src_h = item->src_rect.h;
  3046. if (item->flags & SDE_ROTATION_90) {
  3047. dst_w = item->dst_rect.h;
  3048. dst_h = item->dst_rect.w;
  3049. } else {
  3050. dst_w = item->dst_rect.w;
  3051. dst_h = item->dst_rect.h;
  3052. }
  3053. entry->dnsc_factor_w = 0;
  3054. entry->dnsc_factor_h = 0;
  3055. if (item->output.sbuf &&
  3056. !test_bit(SDE_CAPS_SBUF_1, mdata->sde_caps_map)) {
  3057. SDEROT_ERR("stream buffer not supported\n");
  3058. return -EINVAL;
  3059. }
  3060. if ((src_w != dst_w) || (src_h != dst_h)) {
  3061. if (!dst_w || !dst_h) {
  3062. SDEROT_DBG("zero output width/height not support\n");
  3063. ret = -EINVAL;
  3064. goto dnsc_err;
  3065. }
  3066. if ((src_w % dst_w) || (src_h % dst_h)) {
  3067. SDEROT_DBG("non integral scale not support\n");
  3068. ret = -EINVAL;
  3069. goto dnsc_1p5_check;
  3070. }
  3071. entry->dnsc_factor_w = src_w / dst_w;
  3072. if ((entry->dnsc_factor_w & (entry->dnsc_factor_w - 1)) ||
  3073. (entry->dnsc_factor_w > 64)) {
  3074. SDEROT_DBG("non power-of-2 w_scale not support\n");
  3075. ret = -EINVAL;
  3076. goto dnsc_err;
  3077. }
  3078. entry->dnsc_factor_h = src_h / dst_h;
  3079. if ((entry->dnsc_factor_h & (entry->dnsc_factor_h - 1)) ||
  3080. (entry->dnsc_factor_h > 64)) {
  3081. SDEROT_DBG("non power-of-2 h_scale not support\n");
  3082. ret = -EINVAL;
  3083. goto dnsc_err;
  3084. }
  3085. }
  3086. fmt = sde_get_format_params(item->output.format);
  3087. /*
  3088. * Rotator downscale support max 4 times for UBWC format and
  3089. * max 2 times for TP10/TP10_UBWC format
  3090. */
  3091. if (sde_mdp_is_ubwc_format(fmt) && (entry->dnsc_factor_h > 4)) {
  3092. SDEROT_DBG("max downscale for UBWC format is 4\n");
  3093. ret = -EINVAL;
  3094. goto dnsc_err;
  3095. }
  3096. if (sde_mdp_is_tp10_format(fmt) && (entry->dnsc_factor_h > 2)) {
  3097. SDEROT_DBG("downscale with TP10 cannot be more than 2\n");
  3098. ret = -EINVAL;
  3099. }
  3100. goto dnsc_err;
  3101. dnsc_1p5_check:
  3102. /* Check for 1.5 downscale that only applies to V2 HW */
  3103. if (test_bit(SDE_CAPS_R3_1P5_DOWNSCALE, mdata->sde_caps_map)) {
  3104. entry->dnsc_factor_w = src_w / dst_w;
  3105. if ((entry->dnsc_factor_w != 1) ||
  3106. ((dst_w * 3) != (src_w * 2))) {
  3107. SDEROT_DBG(
  3108. "No supporting non 1.5 downscale width ratio, src_w:%d, dst_w:%d\n",
  3109. src_w, dst_w);
  3110. ret = -EINVAL;
  3111. goto dnsc_err;
  3112. }
  3113. entry->dnsc_factor_h = src_h / dst_h;
  3114. if ((entry->dnsc_factor_h != 1) ||
  3115. ((dst_h * 3) != (src_h * 2))) {
  3116. SDEROT_DBG(
  3117. "Not supporting non 1.5 downscale height ratio, src_h:%d, dst_h:%d\n",
  3118. src_h, dst_h);
  3119. ret = -EINVAL;
  3120. goto dnsc_err;
  3121. }
  3122. ret = 0;
  3123. }
  3124. dnsc_err:
  3125. /* Downscaler does not support asymmetrical dnsc */
  3126. if (entry->dnsc_factor_w != entry->dnsc_factor_h) {
  3127. SDEROT_DBG("asymmetric downscale not support\n");
  3128. ret = -EINVAL;
  3129. }
  3130. if (ret) {
  3131. entry->dnsc_factor_w = 0;
  3132. entry->dnsc_factor_h = 0;
  3133. }
  3134. return ret;
  3135. }
  3136. /*
  3137. * sde_hw_rotator_show_caps - output capability info to sysfs 'caps' file
  3138. * @mgr: Pointer to rotator manager
  3139. * @attr: Pointer to device attribute interface
  3140. * @buf: Pointer to output buffer
  3141. * @len: Length of output buffer
  3142. */
  3143. static ssize_t sde_hw_rotator_show_caps(struct sde_rot_mgr *mgr,
  3144. struct device_attribute *attr, char *buf, ssize_t len)
  3145. {
  3146. struct sde_hw_rotator *hw_data;
  3147. struct sde_rot_data_type *mdata = sde_rot_get_mdata();
  3148. int cnt = 0;
  3149. if (!mgr || !buf)
  3150. return 0;
  3151. hw_data = mgr->hw_data;
  3152. #define SPRINT(fmt, ...) \
  3153. (cnt += scnprintf(buf + cnt, len - cnt, fmt, ##__VA_ARGS__))
  3154. /* insert capabilities here */
  3155. if (test_bit(SDE_CAPS_R3_1P5_DOWNSCALE, mdata->sde_caps_map))
  3156. SPRINT("min_downscale=1.5\n");
  3157. else
  3158. SPRINT("min_downscale=2.0\n");
  3159. SPRINT("downscale_compression=1\n");
  3160. if (hw_data->downscale_caps)
  3161. SPRINT("downscale_ratios=%s\n", hw_data->downscale_caps);
  3162. SPRINT("max_line_width=%d\n", sde_rotator_get_maxlinewidth(mgr));
  3163. #undef SPRINT
  3164. return cnt;
  3165. }
  3166. /*
  3167. * sde_hw_rotator_show_state - output state info to sysfs 'state' file
  3168. * @mgr: Pointer to rotator manager
  3169. * @attr: Pointer to device attribute interface
  3170. * @buf: Pointer to output buffer
  3171. * @len: Length of output buffer
  3172. */
  3173. static ssize_t sde_hw_rotator_show_state(struct sde_rot_mgr *mgr,
  3174. struct device_attribute *attr, char *buf, ssize_t len)
  3175. {
  3176. struct sde_hw_rotator *rot;
  3177. struct sde_hw_rotator_context *ctx;
  3178. int cnt = 0;
  3179. int num_active = 0;
  3180. int i, j;
  3181. if (!mgr || !buf) {
  3182. SDEROT_ERR("null parameters\n");
  3183. return 0;
  3184. }
  3185. rot = mgr->hw_data;
  3186. #define SPRINT(fmt, ...) \
  3187. (cnt += scnprintf(buf + cnt, len - cnt, fmt, ##__VA_ARGS__))
  3188. if (rot) {
  3189. SPRINT("rot_mode=%d\n", rot->mode);
  3190. SPRINT("irq_num=%d\n", rot->irq_num);
  3191. if (rot->mode == ROT_REGDMA_OFF) {
  3192. SPRINT("max_active=1\n");
  3193. SPRINT("num_active=%d\n", rot->rotCtx[0][0] ? 1 : 0);
  3194. } else {
  3195. for (i = 0; i < ROT_QUEUE_MAX; i++) {
  3196. for (j = 0; j < SDE_HW_ROT_REGDMA_TOTAL_CTX;
  3197. j++) {
  3198. ctx = rot->rotCtx[i][j];
  3199. if (ctx) {
  3200. SPRINT(
  3201. "rotCtx[%d][%d]:%pK\n",
  3202. i, j, ctx);
  3203. ++num_active;
  3204. }
  3205. }
  3206. }
  3207. SPRINT("max_active=%d\n", SDE_HW_ROT_REGDMA_TOTAL_CTX);
  3208. SPRINT("num_active=%d\n", num_active);
  3209. }
  3210. }
  3211. #undef SPRINT
  3212. return cnt;
  3213. }
  3214. /*
  3215. * sde_hw_rotator_get_pixfmt - get the indexed pixel format
  3216. * @mgr: Pointer to rotator manager
  3217. * @index: index of pixel format
  3218. * @input: true for input port; false for output port
  3219. * @mode: operating mode
  3220. */
  3221. static u32 sde_hw_rotator_get_pixfmt(struct sde_rot_mgr *mgr,
  3222. int index, bool input, u32 mode)
  3223. {
  3224. struct sde_hw_rotator *rot;
  3225. if (!mgr || !mgr->hw_data) {
  3226. SDEROT_ERR("null parameters\n");
  3227. return 0;
  3228. }
  3229. rot = mgr->hw_data;
  3230. if (mode >= SDE_ROTATOR_MODE_MAX) {
  3231. SDEROT_ERR("invalid rotator mode %d\n", mode);
  3232. return 0;
  3233. }
  3234. if (input) {
  3235. if ((index < rot->num_inpixfmt[mode]) && rot->inpixfmts[mode])
  3236. return rot->inpixfmts[mode][index];
  3237. else
  3238. return 0;
  3239. } else {
  3240. if ((index < rot->num_outpixfmt[mode]) && rot->outpixfmts[mode])
  3241. return rot->outpixfmts[mode][index];
  3242. else
  3243. return 0;
  3244. }
  3245. }
  3246. /*
  3247. * sde_hw_rotator_is_valid_pixfmt - verify if the given pixel format is valid
  3248. * @mgr: Pointer to rotator manager
  3249. * @pixfmt: pixel format to be verified
  3250. * @input: true for input port; false for output port
  3251. * @mode: operating mode
  3252. */
  3253. static int sde_hw_rotator_is_valid_pixfmt(struct sde_rot_mgr *mgr, u32 pixfmt,
  3254. bool input, u32 mode)
  3255. {
  3256. struct sde_hw_rotator *rot;
  3257. const u32 *pixfmts;
  3258. u32 num_pixfmt;
  3259. int i;
  3260. if (!mgr || !mgr->hw_data) {
  3261. SDEROT_ERR("null parameters\n");
  3262. return false;
  3263. }
  3264. rot = mgr->hw_data;
  3265. if (mode >= SDE_ROTATOR_MODE_MAX) {
  3266. SDEROT_ERR("invalid rotator mode %d\n", mode);
  3267. return false;
  3268. }
  3269. if (input) {
  3270. pixfmts = rot->inpixfmts[mode];
  3271. num_pixfmt = rot->num_inpixfmt[mode];
  3272. } else {
  3273. pixfmts = rot->outpixfmts[mode];
  3274. num_pixfmt = rot->num_outpixfmt[mode];
  3275. }
  3276. if (!pixfmts || !num_pixfmt) {
  3277. SDEROT_ERR("invalid pixel format tables\n");
  3278. return false;
  3279. }
  3280. for (i = 0; i < num_pixfmt; i++)
  3281. if (pixfmts[i] == pixfmt)
  3282. return true;
  3283. return false;
  3284. }
  3285. /*
  3286. * sde_hw_rotator_get_downscale_caps - get scaling capability string
  3287. * @mgr: Pointer to rotator manager
  3288. * @caps: Pointer to capability string buffer; NULL to return maximum length
  3289. * @len: length of capability string buffer
  3290. * return: length of capability string
  3291. */
  3292. static int sde_hw_rotator_get_downscale_caps(struct sde_rot_mgr *mgr,
  3293. char *caps, int len)
  3294. {
  3295. struct sde_hw_rotator *rot;
  3296. int rc = 0;
  3297. if (!mgr || !mgr->hw_data) {
  3298. SDEROT_ERR("null parameters\n");
  3299. return -EINVAL;
  3300. }
  3301. rot = mgr->hw_data;
  3302. if (rot->downscale_caps) {
  3303. if (caps)
  3304. rc = snprintf(caps, len, "%s", rot->downscale_caps);
  3305. else
  3306. rc = strlen(rot->downscale_caps);
  3307. }
  3308. return rc;
  3309. }
  3310. /*
  3311. * sde_hw_rotator_get_maxlinewidth - get maximum line width supported
  3312. * @mgr: Pointer to rotator manager
  3313. * return: maximum line width supported by hardware
  3314. */
  3315. static int sde_hw_rotator_get_maxlinewidth(struct sde_rot_mgr *mgr)
  3316. {
  3317. struct sde_hw_rotator *rot;
  3318. if (!mgr || !mgr->hw_data) {
  3319. SDEROT_ERR("null parameters\n");
  3320. return -EINVAL;
  3321. }
  3322. rot = mgr->hw_data;
  3323. return rot->maxlinewidth;
  3324. }
  3325. /*
  3326. * sde_hw_rotator_dump_status - dump status to debug output
  3327. * @mgr: Pointer to rotator manager
  3328. * return: none
  3329. */
  3330. static void sde_hw_rotator_dump_status(struct sde_rot_mgr *mgr)
  3331. {
  3332. if (!mgr || !mgr->hw_data) {
  3333. SDEROT_ERR("null parameters\n");
  3334. return;
  3335. }
  3336. _sde_hw_rotator_dump_status(mgr->hw_data, NULL);
  3337. }
  3338. /*
  3339. * sde_hw_rotator_parse_dt - parse r3 specific device tree settings
  3340. * @hw_data: Pointer to rotator hw
  3341. * @dev: Pointer to platform device
  3342. */
  3343. static int sde_hw_rotator_parse_dt(struct sde_hw_rotator *hw_data,
  3344. struct platform_device *dev)
  3345. {
  3346. int ret = 0;
  3347. u32 data;
  3348. if (!hw_data || !dev)
  3349. return -EINVAL;
  3350. ret = of_property_read_u32(dev->dev.of_node, "qcom,mdss-rot-mode",
  3351. &data);
  3352. if (ret) {
  3353. SDEROT_DBG("default to regdma off\n");
  3354. ret = 0;
  3355. hw_data->mode = ROT_REGDMA_OFF;
  3356. } else if (data < ROT_REGDMA_MAX) {
  3357. SDEROT_DBG("set to regdma mode %d\n", data);
  3358. hw_data->mode = data;
  3359. } else {
  3360. SDEROT_ERR("regdma mode out of range. default to regdma off\n");
  3361. hw_data->mode = ROT_REGDMA_OFF;
  3362. }
  3363. ret = of_property_read_u32(dev->dev.of_node,
  3364. "qcom,mdss-highest-bank-bit", &data);
  3365. if (ret) {
  3366. SDEROT_DBG("default to A5X bank\n");
  3367. ret = 0;
  3368. hw_data->highest_bank = 2;
  3369. } else {
  3370. SDEROT_DBG("set highest bank bit to %d\n", data);
  3371. hw_data->highest_bank = data;
  3372. }
  3373. ret = of_property_read_u32(dev->dev.of_node,
  3374. "qcom,sde-ubwc-malsize", &data);
  3375. if (ret) {
  3376. ret = 0;
  3377. hw_data->ubwc_malsize = DEFAULT_UBWC_MALSIZE;
  3378. } else {
  3379. SDEROT_DBG("set ubwc malsize to %d\n", data);
  3380. hw_data->ubwc_malsize = data;
  3381. }
  3382. ret = of_property_read_u32(dev->dev.of_node,
  3383. "qcom,sde-ubwc_swizzle", &data);
  3384. if (ret) {
  3385. ret = 0;
  3386. hw_data->ubwc_swizzle = DEFAULT_UBWC_SWIZZLE;
  3387. } else {
  3388. SDEROT_DBG("set ubwc swizzle to %d\n", data);
  3389. hw_data->ubwc_swizzle = data;
  3390. }
  3391. ret = of_property_read_u32(dev->dev.of_node,
  3392. "qcom,mdss-sbuf-headroom", &data);
  3393. if (ret) {
  3394. ret = 0;
  3395. hw_data->sbuf_headroom = DEFAULT_SBUF_HEADROOM;
  3396. } else {
  3397. SDEROT_DBG("set sbuf headroom to %d\n", data);
  3398. hw_data->sbuf_headroom = data;
  3399. }
  3400. ret = of_property_read_u32(dev->dev.of_node,
  3401. "qcom,mdss-rot-linewidth", &data);
  3402. if (ret) {
  3403. ret = 0;
  3404. hw_data->maxlinewidth = DEFAULT_MAXLINEWIDTH;
  3405. } else {
  3406. SDEROT_DBG("set mdss-rot-linewidth to %d\n", data);
  3407. hw_data->maxlinewidth = data;
  3408. }
  3409. return ret;
  3410. }
  3411. /*
  3412. * sde_rotator_r3_init - initialize the r3 module
  3413. * @mgr: Pointer to rotator manager
  3414. *
  3415. * This function setup r3 callback functions, parses r3 specific
  3416. * device tree settings, installs r3 specific interrupt handler,
  3417. * as well as initializes r3 internal data structure.
  3418. */
  3419. int sde_rotator_r3_init(struct sde_rot_mgr *mgr)
  3420. {
  3421. struct sde_hw_rotator *rot;
  3422. struct sde_rot_data_type *mdata = sde_rot_get_mdata();
  3423. int i;
  3424. int ret;
  3425. rot = devm_kzalloc(&mgr->pdev->dev, sizeof(*rot), GFP_KERNEL);
  3426. if (!rot)
  3427. return -ENOMEM;
  3428. mgr->hw_data = rot;
  3429. mgr->queue_count = ROT_QUEUE_MAX;
  3430. rot->mdss_base = mdata->sde_io.base;
  3431. rot->pdev = mgr->pdev;
  3432. rot->koff_timeout = KOFF_TIMEOUT;
  3433. rot->vid_trigger = ROTTOP_START_CTRL_TRIG_SEL_MDP;
  3434. rot->cmd_trigger = ROTTOP_START_CTRL_TRIG_SEL_MDP;
  3435. /* Assign ops */
  3436. mgr->ops_hw_destroy = sde_hw_rotator_destroy;
  3437. mgr->ops_hw_alloc = sde_hw_rotator_alloc_ext;
  3438. mgr->ops_hw_free = sde_hw_rotator_free_ext;
  3439. mgr->ops_config_hw = sde_hw_rotator_config;
  3440. mgr->ops_cancel_hw = sde_hw_rotator_cancel;
  3441. mgr->ops_abort_hw = sde_hw_rotator_abort_kickoff;
  3442. mgr->ops_kickoff_entry = sde_hw_rotator_kickoff;
  3443. mgr->ops_wait_for_entry = sde_hw_rotator_wait4done;
  3444. mgr->ops_hw_validate_entry = sde_hw_rotator_validate_entry;
  3445. mgr->ops_hw_show_caps = sde_hw_rotator_show_caps;
  3446. mgr->ops_hw_show_state = sde_hw_rotator_show_state;
  3447. mgr->ops_hw_create_debugfs = sde_rotator_r3_create_debugfs;
  3448. mgr->ops_hw_get_pixfmt = sde_hw_rotator_get_pixfmt;
  3449. mgr->ops_hw_is_valid_pixfmt = sde_hw_rotator_is_valid_pixfmt;
  3450. mgr->ops_hw_pre_pmevent = sde_hw_rotator_pre_pmevent;
  3451. mgr->ops_hw_post_pmevent = sde_hw_rotator_post_pmevent;
  3452. mgr->ops_hw_get_downscale_caps = sde_hw_rotator_get_downscale_caps;
  3453. mgr->ops_hw_get_maxlinewidth = sde_hw_rotator_get_maxlinewidth;
  3454. mgr->ops_hw_dump_status = sde_hw_rotator_dump_status;
  3455. ret = sde_hw_rotator_parse_dt(mgr->hw_data, mgr->pdev);
  3456. if (ret)
  3457. goto error_parse_dt;
  3458. rot->irq_num = platform_get_irq(mgr->pdev, 0);
  3459. if (rot->irq_num == -EPROBE_DEFER) {
  3460. SDEROT_INFO("irq master master not ready, defer probe\n");
  3461. return -EPROBE_DEFER;
  3462. } else if (rot->irq_num < 0) {
  3463. SDEROT_ERR("fail to get rotator irq, fallback to polling\n");
  3464. } else {
  3465. if (rot->mode == ROT_REGDMA_OFF)
  3466. ret = devm_request_threaded_irq(&mgr->pdev->dev,
  3467. rot->irq_num,
  3468. sde_hw_rotator_rotirq_handler,
  3469. NULL, 0, "sde_rotator_r3", rot);
  3470. else
  3471. ret = devm_request_threaded_irq(&mgr->pdev->dev,
  3472. rot->irq_num,
  3473. sde_hw_rotator_regdmairq_handler,
  3474. NULL, 0, "sde_rotator_r3", rot);
  3475. if (ret) {
  3476. SDEROT_ERR("fail to request irq r:%d\n", ret);
  3477. rot->irq_num = -1;
  3478. } else {
  3479. disable_irq(rot->irq_num);
  3480. }
  3481. }
  3482. atomic_set(&rot->irq_enabled, 0);
  3483. ret = sde_rotator_hw_rev_init(rot);
  3484. if (ret)
  3485. goto error_hw_rev_init;
  3486. setup_rotator_ops(&rot->ops, rot->mode,
  3487. test_bit(SDE_CAPS_HW_TIMESTAMP, mdata->sde_caps_map));
  3488. spin_lock_init(&rot->rotctx_lock);
  3489. spin_lock_init(&rot->rotisr_lock);
  3490. /* REGDMA initialization */
  3491. if (rot->mode == ROT_REGDMA_OFF) {
  3492. for (i = 0; i < SDE_HW_ROT_REGDMA_TOTAL_CTX; i++)
  3493. rot->cmd_wr_ptr[0][i] = (char __iomem *)(
  3494. &rot->cmd_queue[
  3495. SDE_HW_ROT_REGDMA_SEG_SIZE * i]);
  3496. } else {
  3497. for (i = 0; i < SDE_HW_ROT_REGDMA_TOTAL_CTX; i++)
  3498. rot->cmd_wr_ptr[ROT_QUEUE_HIGH_PRIORITY][i] =
  3499. rot->mdss_base +
  3500. REGDMA_RAM_REGDMA_CMD_RAM +
  3501. SDE_HW_ROT_REGDMA_SEG_SIZE * 4 * i;
  3502. for (i = 0; i < SDE_HW_ROT_REGDMA_TOTAL_CTX; i++)
  3503. rot->cmd_wr_ptr[ROT_QUEUE_LOW_PRIORITY][i] =
  3504. rot->mdss_base +
  3505. REGDMA_RAM_REGDMA_CMD_RAM +
  3506. SDE_HW_ROT_REGDMA_SEG_SIZE * 4 *
  3507. (i + SDE_HW_ROT_REGDMA_TOTAL_CTX);
  3508. }
  3509. for (i = 0; i < ROT_QUEUE_MAX; i++) {
  3510. atomic_set(&rot->timestamp[i], 0);
  3511. INIT_LIST_HEAD(&rot->sbuf_ctx[i]);
  3512. }
  3513. /* set rotator CBCR to shutoff memory/periphery on clock off.*/
  3514. clk_set_flags(mgr->rot_clk[SDE_ROTATOR_CLK_MDSS_ROT].clk,
  3515. CLKFLAG_NORETAIN_MEM);
  3516. clk_set_flags(mgr->rot_clk[SDE_ROTATOR_CLK_MDSS_ROT].clk,
  3517. CLKFLAG_NORETAIN_PERIPH);
  3518. mdata->sde_rot_hw = rot;
  3519. return 0;
  3520. error_hw_rev_init:
  3521. if (rot->irq_num >= 0)
  3522. devm_free_irq(&mgr->pdev->dev, rot->irq_num, mdata);
  3523. devm_kfree(&mgr->pdev->dev, mgr->hw_data);
  3524. error_parse_dt:
  3525. return ret;
  3526. }