hgsl.c 108 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166416741684169417041714172417341744175417641774178417941804181418241834184418541864187418841894190419141924193419441954196419741984199420042014202420342044205420642074208420942104211421242134214421542164217421842194220422142224223422442254226422742284229423042314232423342344235423642374238423942404241424242434244424542464247424842494250425142524253425442554256425742584259426042614262426342644265426642674268426942704271427242734274427542764277427842794280428142824283428442854286428742884289429042914292429342944295429642974298429943004301430243034304430543064307430843094310431143124313431443154316431743184319432043214322432343244325432643274328432943304331433243334334433543364337433843394340434143424343434443454346434743484349435043514352435343544355435643574358435943604361436243634364436543664367436843694370437143724373437443754376437743784379438043814382438343844385438643874388438943904391439243934394439543964397439843994400440144024403440444054406440744084409441044114412441344144415441644174418441944204421442244234424442544264427442844294430443144324433443444354436443744384439444044414442444344444445444644474448444944504451445244534454445544564457445844594460446144624463446444654466446744684469447044714472447344744475447644774478447944804481448244834484448544864487
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2019-2022, The Linux Foundation. All rights reserved.
  4. * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
  5. */
  6. #include <asm/unistd.h>
  7. #include <asm/ioctl.h>
  8. #include <linux/types.h>
  9. #include <linux/dma-buf.h>
  10. #include <linux/interrupt.h>
  11. #include <linux/mfd/syscon.h>
  12. #include <linux/module.h>
  13. #include <linux/mutex.h>
  14. #include <linux/of_irq.h>
  15. #include <linux/of_platform.h>
  16. #include <linux/platform_device.h>
  17. #include <linux/regmap.h>
  18. #include <linux/uaccess.h>
  19. #include <uapi/linux/hgsl.h>
  20. #include <linux/delay.h>
  21. #include <trace/events/gpu_mem.h>
  22. #include <linux/suspend.h>
  23. #include "hgsl.h"
  24. #include "hgsl_tcsr.h"
  25. #include "hgsl_memory.h"
  26. #include "hgsl_sysfs.h"
  27. #include "hgsl_debugfs.h"
  28. #define HGSL_DEVICE_NAME "hgsl"
  29. #define HGSL_DEV_NUM 1
  30. #define IORESOURCE_HWINF "hgsl_reg_hwinf"
  31. #define IORESOURCE_GMUCX "hgsl_reg_gmucx"
  32. /* Set-up profiling packets as needed by scope */
  33. #define CMDBATCH_PROFILING 0x00000010
  34. /* Ping the user of HFI when this command is done */
  35. #define CMDBATCH_NOTIFY 0x00000020
  36. #define CMDBATCH_EOF 0x00000100
  37. #define ECP_MAX_NUM_IB1 (2000)
  38. /* ibDescs stored in indirect buffer */
  39. #define CMDBATCH_INDIRECT 0x00000200
  40. /* Max retry count of waiting for free space of doorbell queue. */
  41. #define HGSL_QFREE_MAX_RETRY_COUNT (500)
  42. #define GLB_DB_SRC_ISSUEIB_IRQ_ID_0 TCSR_SRC_IRQ_ID_0
  43. #define GLB_DB_DEST_TS_RETIRE_IRQ_ID TCSR_DEST_IRQ_ID_0
  44. #define GLB_DB_DEST_TS_RETIRE_IRQ_MASK TCSR_DEST_IRQ_MASK_0
  45. #define HGSL_HYP_GENERAL_MAX_SIZE 4096
  46. #define DB_STATE_Q_MASK 0xffff
  47. #define DB_STATE_Q_UNINIT 1
  48. #define DB_STATE_Q_INIT_DONE 2
  49. #define DB_STATE_Q_FAULT 3
  50. /* Doorbell Signal types */
  51. #define DB_SIGNAL_INVALID 0
  52. #define DB_SIGNAL_GLOBAL_0 1
  53. #define DB_SIGNAL_GLOBAL_1 2
  54. #define DB_SIGNAL_LOCAL 3
  55. #define DB_SIGNAL_MAX DB_SIGNAL_LOCAL
  56. #define DB_SIGNAL_GLOBAL_2 3
  57. #define DB_SIGNAL_GLOBAL_3 4
  58. #define DBCQ_SIGNAL_MAX DB_SIGNAL_GLOBAL_3
  59. #define HGSL_CLEANUP_WAIT_SLICE_IN_MS 50
  60. #define QHDR_STATUS_INACTIVE 0x00
  61. #define QHDR_STATUS_ACTIVE 0x01
  62. #define HGSL_SEND_MSG_MAX_RETRY_COUNT (150)
  63. // Skip all commands from the bad context
  64. #define HGSL_FT_POLICY_FLAG_KILL BIT(2)
  65. #define ALIGN_ADDRESS_4DWORD(addr) (((addr)+15) & ((long long) ~15))
  66. #define ALIGN_DWORD_ADDRESS_4DWORD(dwaddr) (ALIGN_ADDRESS_4DWORD((dwaddr) * \
  67. sizeof(uint32_t)) / sizeof(uint32_t))
  68. enum HGSL_DBQ_METADATA_COMMAND_INFO {
  69. HGSL_DBQ_METADATA_CONTEXT_INFO,
  70. HGSL_DBQ_METADATA_QUEUE_INDEX,
  71. HGSL_DBQ_METADATA_COOPERATIVE_RESET,
  72. };
  73. #define HGSL_DBQ_CONTEXT_ANY (0x0)
  74. #define HGSL_DBQ_OFFSET_ZERO (0x0)
  75. #define HGSL_DBQ_WRITE_INDEX_OFFSET_IN_DWORD (0x0)
  76. #define HGSL_DBQ_READ_INDEX_OFFSET_IN_DWORD (0x1)
  77. #define HGSL_DBQ_IBDESC_SHORT_WAIT_MSEC (5)
  78. #define HGSL_DBQ_IBDESC_LONG_WAIT_MSEC (30000)
  79. #define HGSL_DBCQ_IBDESC_SHORT_WAIT_MSEC (5000)
  80. enum HGSL_DBQ_METADATA_COOPERATIVE_RESET_INFO {
  81. HGSL_DBQ_HOST_TO_GVM_HARDRESET_REQ,
  82. HGSL_DBQ_GVM_TO_HOST_HARDRESET_DISPATCH_IN_BUSY,
  83. };
  84. enum HGSL_DBQ_METADATA_CONTEXT_OFFSET_INFO {
  85. HGSL_DBQ_CONTEXT_CONTEXT_ID_OFFSET_IN_DWORD,
  86. HGSL_DBQ_CONTEXT_TIMESTAMP_OFFSET_IN_DWORD,
  87. HGSL_DBQ_CONTEXT_DESTROY_OFFSET_IN_DWORD,
  88. HGSL_DBQ_METADATA_CTXT_TOTAL_ENTITY_NUM,
  89. };
  90. enum HGSL_DBQ_IBDESC_REQUEST_TYPE {
  91. HGSL_DBQ_IBDESC_REQUEST_ACQUIRE,
  92. HGSL_DBQ_IBDESC_REQUEST_RELEASE,
  93. };
  94. enum HGSL_DBQ_IBDESC_WAIT_TYPE {
  95. /* If caller can retry, use short wait */
  96. HGSL_DBQ_IBDESC_SHORT_WAIT,
  97. /* If caller not capable of retrying, use long wait */
  98. HGSL_DBQ_IBDESC_LONG_WAIT,
  99. };
  100. /* DBQ structure
  101. * IBs storage | reserved | w.idx/r.idx | ctxt.info | hard reset | batch ibs |
  102. * 0 1K 1.5K 2K 5.5K 6K |
  103. * | | | | | | |
  104. */
  105. #define HGSL_DBQ_HFI_Q_INDEX_BASE_OFFSET_IN_DWORD (1536 >> 2)
  106. #define HGSL_DBQ_CONTEXT_INFO_BASE_OFFSET_IN_DWORD (2048 >> 2)
  107. #define HGSL_DBQ_COOPERATIVE_RESET_INFO_BASE_OFFSET_IN_DWORD (5632 >> 2)
  108. #define HGSL_DBQ_IBDESC_BASE_OFFSET_IN_DWORD (6144 >> 2)
  109. #define HGSL_CTXT_QUEUE_BODY_DWSIZE (256)
  110. #define HGSL_CTXT_QUEUE_BODY_SIZE (HGSL_CTXT_QUEUE_BODY_DWSIZE * sizeof(uint32_t))
  111. #define HGSL_CTXT_QUEUE_BODY_OFFSET ALIGN_ADDRESS_4DWORD(sizeof(struct ctx_queue_header))
  112. // Use indirect submission when the ib number is too big to be submitted inside hfi cmd.
  113. #define HGSL_CTXT_QUEUE_INDIRECT_IB_DWSIZE (6000)
  114. #define HGSL_CTXT_QUEUE_INDIRECT_IB_SIZE (HGSL_CTXT_QUEUE_INDIRECT_IB_DWSIZE * sizeof(uint32_t))
  115. #define HGSL_CTXT_QUEUE_INDIRECT_IB_OFFSET ALIGN_ADDRESS_4DWORD(HGSL_CTXT_QUEUE_BODY_OFFSET +\
  116. HGSL_CTXT_QUEUE_BODY_SIZE)
  117. #define HGSL_CTXT_QUEUE_TOTAL_SIZE PAGE_ALIGN(HGSL_CTXT_QUEUE_INDIRECT_IB_SIZE +\
  118. HGSL_CTXT_QUEUE_INDIRECT_IB_OFFSET)
  119. struct ctx_queue_header {
  120. uint32_t version; // Version of the context queue header
  121. uint32_t startAddr; // GMU VA of start of queue
  122. uint32_t dwSize; // Queue size in dwords
  123. uint32_t outFenceTs; // Timestamp of the last output hardware fence sent to TxQueue
  124. uint32_t syncObjTs; // Timestamp of last SYNC object that has been signaled
  125. uint32_t readIdx; // Read index of the queue
  126. uint32_t writeIdx; // Write index of the queue
  127. uint32_t hwFenceArrayAddr; // GMU VA of the buffer to store output hardware fences
  128. uint32_t hwFenceArraySize; // Size(bytes) of the buffer to store output hardware fences
  129. uint32_t dbqSignal;
  130. uint32_t unused0;
  131. uint32_t unused1;
  132. };
  133. static inline bool _timestamp_retired(struct hgsl_context *ctxt,
  134. unsigned int timestamp);
  135. static inline void set_context_retired_ts(struct hgsl_context *ctxt,
  136. unsigned int ts);
  137. static void _signal_contexts(struct qcom_hgsl *hgsl);
  138. static int db_get_busy_state(void *dbq_base);
  139. static void db_set_busy_state(void *dbq_base, int in_busy);
  140. static int dbcq_get_free_indirect_ib_buffer(struct hgsl_priv *priv,
  141. struct hgsl_context *ctxt,
  142. uint32_t ts, uint32_t timeout_in_ms);
  143. static struct hgsl_context *hgsl_get_context(struct qcom_hgsl *hgsl,
  144. uint32_t context_id);
  145. static void hgsl_put_context(struct hgsl_context *ctxt);
  146. static bool dbq_check_ibdesc_state(struct qcom_hgsl *hgsl, struct hgsl_context *ctxt,
  147. uint32_t request_type);
  148. static int dbq_wait_free_ibdesc(struct qcom_hgsl *hgsl,
  149. struct hgsl_context *context, uint32_t request_type,
  150. uint32_t wait_type);
  151. static int hgsl_wait_timestamp(struct qcom_hgsl *hgsl,
  152. struct hgsl_wait_ts_info *param);
  153. static uint32_t hgsl_dbq_get_state_info(uint32_t *va_base, uint32_t command,
  154. uint32_t ctxt_id, uint32_t offset)
  155. {
  156. uint32_t *dest = NULL;
  157. switch (command) {
  158. case HGSL_DBQ_METADATA_QUEUE_INDEX:
  159. dest = (uint32_t *)(va_base +
  160. HGSL_DBQ_HFI_Q_INDEX_BASE_OFFSET_IN_DWORD +
  161. offset);
  162. break;
  163. case HGSL_DBQ_METADATA_CONTEXT_INFO:
  164. dest = (uint32_t *)(va_base +
  165. HGSL_DBQ_CONTEXT_INFO_BASE_OFFSET_IN_DWORD +
  166. (HGSL_DBQ_METADATA_CTXT_TOTAL_ENTITY_NUM *
  167. ctxt_id) + offset);
  168. break;
  169. case HGSL_DBQ_METADATA_COOPERATIVE_RESET:
  170. dest = (uint32_t *)(va_base +
  171. HGSL_DBQ_COOPERATIVE_RESET_INFO_BASE_OFFSET_IN_DWORD +
  172. offset);
  173. break;
  174. default:
  175. break;
  176. }
  177. return ((dest != NULL) ? (*dest) : (0));
  178. }
  179. static void hgsl_dbq_set_state_info(uint32_t *va_base, uint32_t command,
  180. uint32_t ctxt_id, uint32_t offset,
  181. uint32_t value)
  182. {
  183. uint32_t *dest = NULL;
  184. switch (command) {
  185. case HGSL_DBQ_METADATA_QUEUE_INDEX:
  186. dest = (uint32_t *)(va_base +
  187. HGSL_DBQ_HFI_Q_INDEX_BASE_OFFSET_IN_DWORD +
  188. (HGSL_DBQ_METADATA_CTXT_TOTAL_ENTITY_NUM *
  189. ctxt_id) + offset);
  190. *dest = value;
  191. break;
  192. case HGSL_DBQ_METADATA_CONTEXT_INFO:
  193. dest = (uint32_t *)(va_base +
  194. HGSL_DBQ_CONTEXT_INFO_BASE_OFFSET_IN_DWORD +
  195. (HGSL_DBQ_METADATA_CTXT_TOTAL_ENTITY_NUM *
  196. ctxt_id) + offset);
  197. *dest = value;
  198. break;
  199. case HGSL_DBQ_METADATA_COOPERATIVE_RESET:
  200. dest = (uint32_t *)(va_base +
  201. HGSL_DBQ_COOPERATIVE_RESET_INFO_BASE_OFFSET_IN_DWORD +
  202. offset);
  203. *dest = value;
  204. break;
  205. default:
  206. break;
  207. }
  208. }
  209. #define HFI_MSG_TYPE_CMD 0
  210. #define HFI_MSG_TYPE_RET 1
  211. /* HFI command define. */
  212. #define HTOF_MSG_ISSUE_CMD 130
  213. #define HFI_HEADER_CMD_SIZE_MAX (255)
  214. #define MSG_ISSUE_INF_SZ() (sizeof(struct hgsl_db_cmds) >> 2)
  215. #define MSG_ISSUE_IBS_SZ(numIB) \
  216. ((numIB) * (sizeof(struct hgsl_fw_ib_desc) >> 2))
  217. #define MSG_SEQ_NO_MASK 0xFFF00000
  218. #define MSG_SEQ_NO_SHIFT 20
  219. #define MSG_SEQ_NO_GET(x) (((x) & MSG_SEQ_NO_MASK) >> MSG_SEQ_NO_SHIFT)
  220. #define MSG_TYPE_MASK 0x000F0000
  221. #define MSG_TYPE_SHIFT 16
  222. #define MSG_TYPE_GET(x) (((x) & MSG_TYPE_MASK) >> MSG_TYPE_SHIFT)
  223. #define MSG_SZ_MASK 0x0000FF00
  224. #define MSG_SZ_SHIFT 8
  225. #define MSG_SZ_GET(x) (((x) & MSG_SZ_MASK) >> MSG_SZ_SHIFT)
  226. #define MSG_ID_MASK 0x000000FF
  227. #define MSG_ID_GET(x) ((x) & MSG_ID_MASK)
  228. #define MAKE_HFI_MSG_HEADER(msgID, msgType, msgSize, msgSeqnum) \
  229. ((msgID) | ((msgSize) << MSG_SZ_SHIFT) | \
  230. ((msgType) << MSG_TYPE_SHIFT) | \
  231. ((msgSeqnum) << MSG_SEQ_NO_SHIFT))
  232. #define HFI_ISSUE_IB_HEADER(numIB, sz, msgSeqnum) \
  233. MAKE_HFI_MSG_HEADER( \
  234. HTOF_MSG_ISSUE_CMD, \
  235. HFI_MSG_TYPE_CMD, \
  236. sz,\
  237. msgSeqnum)
  238. /*
  239. * GMU HFI memory allocation options:
  240. * RGS_GMU_HFI_BUFFER_DTCM: Allocated from GMU CM3 DTCM.
  241. * RGS_GMU_HFI_BUFFER_NON_CACHEMEM: POR mode. Allocated from non cached memory.
  242. */
  243. enum db_buffer_mode_t {
  244. RGS_GMU_HFI_BUFFER_DTCM = 0,
  245. RGS_GMU_HFI_BUFFER_NON_CACHEMEM = 1,
  246. RGS_GMU_HFI_BUFFER_DEFAULT = 1
  247. };
  248. struct db_msg_request {
  249. int msg_has_response;
  250. int msg_has_ret_packet;
  251. int ignore_ret_packet;
  252. void *ptr_data;
  253. unsigned int msg_dwords;
  254. } __packed;
  255. struct db_msg_response {
  256. void *ptr_data;
  257. unsigned int size_dword;
  258. } __packed;
  259. /*
  260. * IB start address
  261. * IB size
  262. */
  263. struct hgsl_fw_ib_desc {
  264. uint64_t addr;
  265. uint32_t sz;
  266. } __packed;
  267. struct hfi_msg_header_fields {
  268. uint32_t msg_id : 8; ///< 0~127 power, 128~255 eCP
  269. uint32_t msg_size_dword : 8; ///< unit in dword, maximum 255
  270. uint32_t msg_type : 4; ///< refer to adreno_hfi_msg_type_t
  271. uint32_t msg_packet_seq_no : 12;
  272. };
  273. union hfi_msg_header {
  274. uint32_t u32_all;
  275. struct hfi_msg_header_fields fields;
  276. };
  277. /*
  278. * Context ID
  279. * cmd_flags
  280. * Per-context user space gsl timestamp. It has to be
  281. * greater than last retired timestamp.
  282. * Number of IB descriptors
  283. * An array of IB descriptors
  284. */
  285. struct hgsl_db_cmds {
  286. union hfi_msg_header header;
  287. uint32_t ctx_id;
  288. uint32_t cmd_flags;
  289. uint32_t timestamp;
  290. uint64_t user_profile_gpuaddr;
  291. uint32_t num_ibs;
  292. uint32_t ib_desc_gmuaddr;
  293. struct hgsl_fw_ib_desc ib_descs[];
  294. } __packed;
  295. struct hgsl_db_msg_ret {
  296. uint32_t header;
  297. uint32_t ack;
  298. uint32_t err;
  299. } __packed;
  300. struct db_msg_id {
  301. uint32_t seq_no;
  302. uint32_t msg_id;
  303. } __packed;
  304. struct db_wait_retpacket {
  305. size_t event_signal;
  306. int in_use;
  307. struct db_msg_id db_msg_id;
  308. struct db_msg_response response;
  309. } __packed;
  310. struct db_ignore_retpacket {
  311. int in_use;
  312. struct db_msg_id db_msg_id;
  313. } __packed;
  314. struct hgsl_active_wait {
  315. struct list_head head;
  316. struct hgsl_context *ctxt;
  317. unsigned int timestamp;
  318. };
  319. #ifdef CONFIG_TRACE_GPU_MEM
  320. static inline void hgsl_trace_gpu_mem_total(struct hgsl_priv *priv, int64_t delta)
  321. {
  322. struct qcom_hgsl *hgsl = priv->dev;
  323. uint64_t size = atomic64_add_return(delta, &priv->total_mem_size);
  324. uint64_t global_size = atomic64_add_return(delta, &hgsl->total_mem_size);
  325. trace_gpu_mem_total(0, priv->pid, size);
  326. trace_gpu_mem_total(0, 0, global_size);
  327. }
  328. #else
  329. static inline void hgsl_trace_gpu_mem_total(struct hgsl_priv *priv, int64_t delta)
  330. {
  331. }
  332. #endif
  333. static int hgsl_reg_map(struct platform_device *pdev,
  334. char *res_name, struct reg *reg);
  335. static void hgsl_reg_read(struct reg *reg, unsigned int off,
  336. unsigned int *value)
  337. {
  338. if (reg == NULL)
  339. return;
  340. if (WARN(off > reg->size,
  341. "Invalid reg read:0x%x, reg size:0x%x\n",
  342. off, reg->size))
  343. return;
  344. *value = __raw_readl(reg->vaddr + off);
  345. /* ensure this read finishes before the next one.*/
  346. dma_rmb();
  347. }
  348. static void hgsl_reg_write(struct reg *reg, unsigned int off,
  349. unsigned int value)
  350. {
  351. if (reg == NULL)
  352. return;
  353. if (WARN(off > reg->size,
  354. "Invalid reg write:0x%x, reg size:0x%x\n",
  355. off, reg->size))
  356. return;
  357. /*
  358. * ensure previous writes post before this one,
  359. * i.e. act like normal writel()
  360. */
  361. dma_wmb();
  362. __raw_writel(value, (reg->vaddr + off));
  363. }
  364. static inline bool is_global_db(int tcsr_idx)
  365. {
  366. return (tcsr_idx >= 0);
  367. }
  368. static void gmu_ring_local_db(struct qcom_hgsl *hgsl, unsigned int value)
  369. {
  370. hgsl_reg_write(&hgsl->reg_dbidx, 0, value);
  371. }
  372. static void tcsr_ring_global_db(struct qcom_hgsl *hgsl, uint32_t tcsr_idx,
  373. uint32_t dbq_idx)
  374. {
  375. if (tcsr_idx < HGSL_TCSR_NUM)
  376. hgsl_tcsr_irq_trigger(hgsl->tcsr[tcsr_idx][HGSL_TCSR_ROLE_SENDER],
  377. GLB_DB_SRC_ISSUEIB_IRQ_ID_0 + dbq_idx);
  378. }
  379. static uint32_t db_queue_freedwords(struct doorbell_queue *dbq)
  380. {
  381. uint32_t queue_size;
  382. uint32_t queue_used;
  383. uint32_t wptr;
  384. uint32_t rptr;
  385. if (dbq == NULL)
  386. return 0;
  387. wptr = hgsl_dbq_get_state_info((uint32_t *)dbq->vbase,
  388. HGSL_DBQ_METADATA_QUEUE_INDEX, HGSL_DBQ_CONTEXT_ANY,
  389. HGSL_DBQ_WRITE_INDEX_OFFSET_IN_DWORD);
  390. rptr = hgsl_dbq_get_state_info((uint32_t *)dbq->vbase,
  391. HGSL_DBQ_METADATA_QUEUE_INDEX, HGSL_DBQ_CONTEXT_ANY,
  392. HGSL_DBQ_READ_INDEX_OFFSET_IN_DWORD);
  393. queue_size = dbq->data.dwords;
  394. queue_used = (wptr + queue_size - rptr) % queue_size;
  395. return (queue_size - queue_used - 1);
  396. }
  397. static int db_queue_wait_freewords(struct doorbell_queue *dbq, uint32_t size)
  398. {
  399. unsigned int retry_count = 0;
  400. unsigned int hard_reset_req = false;
  401. if (size == 0)
  402. return 0;
  403. if (dbq == NULL)
  404. return -EINVAL;
  405. do {
  406. hard_reset_req = hgsl_dbq_get_state_info((uint32_t *)dbq->vbase,
  407. HGSL_DBQ_METADATA_COOPERATIVE_RESET,
  408. HGSL_DBQ_CONTEXT_ANY,
  409. HGSL_DBQ_HOST_TO_GVM_HARDRESET_REQ);
  410. /* ensure read is done before comparison */
  411. dma_rmb();
  412. if (hard_reset_req == true) {
  413. if (db_get_busy_state(dbq->vbase) == true)
  414. db_set_busy_state(dbq->vbase, false);
  415. } else {
  416. if (db_queue_freedwords(dbq) >= size) {
  417. db_set_busy_state(dbq->vbase, true);
  418. return 0;
  419. }
  420. }
  421. if (msleep_interruptible(1))
  422. /* Let user handle this */
  423. return -EINTR;
  424. } while (retry_count++ < HGSL_QFREE_MAX_RETRY_COUNT);
  425. return -ETIMEDOUT;
  426. }
  427. static uint32_t db_context_queue_freedwords(struct doorbell_context_queue *dbcq)
  428. {
  429. struct ctx_queue_header *queue_header = (struct ctx_queue_header *)dbcq->queue_header;
  430. uint32_t queue_size = queue_header->dwSize;
  431. uint32_t wptr = queue_header->writeIdx;
  432. uint32_t rptr = queue_header->readIdx;
  433. uint32_t queue_used = (wptr + queue_size - rptr) % queue_size;
  434. return (queue_size - queue_used - 1);
  435. }
  436. static int dbcq_queue_wait_freewords(struct doorbell_context_queue *dbcq, uint32_t size)
  437. {
  438. unsigned int retry_count = 0;
  439. do {
  440. if (db_context_queue_freedwords(dbcq) >= size)
  441. return 0;
  442. if (msleep_interruptible(1))
  443. /* Let user handle this */
  444. return -EINTR;
  445. } while (retry_count++ < HGSL_QFREE_MAX_RETRY_COUNT);
  446. return -ETIMEDOUT;
  447. }
  448. static int db_get_busy_state(void *dbq_base)
  449. {
  450. unsigned int busy_state = false;
  451. busy_state = hgsl_dbq_get_state_info((uint32_t *)dbq_base,
  452. HGSL_DBQ_METADATA_COOPERATIVE_RESET,
  453. HGSL_DBQ_CONTEXT_ANY,
  454. HGSL_DBQ_GVM_TO_HOST_HARDRESET_DISPATCH_IN_BUSY);
  455. /* ensure read is done before comparison */
  456. dma_rmb();
  457. return busy_state;
  458. }
  459. static void db_set_busy_state(void *dbq_base, int in_busy)
  460. {
  461. hgsl_dbq_set_state_info((uint32_t *)dbq_base,
  462. HGSL_DBQ_METADATA_COOPERATIVE_RESET,
  463. HGSL_DBQ_CONTEXT_ANY,
  464. HGSL_DBQ_GVM_TO_HOST_HARDRESET_DISPATCH_IN_BUSY,
  465. in_busy);
  466. /* confirm write to memory done */
  467. dma_wmb();
  468. }
  469. static int dbcq_send_msg(struct hgsl_priv *priv,
  470. struct db_msg_id *db_msg_id,
  471. struct db_msg_request *msg_req,
  472. struct db_msg_response *msg_resp,
  473. struct hgsl_context *ctxt)
  474. {
  475. uint32_t msg_size_align;
  476. int ret;
  477. uint8_t *src, *dst;
  478. uint32_t move_dwords, resid_move_dwords;
  479. uint32_t queue_size_dword;
  480. struct qcom_hgsl *hgsl = priv->dev;
  481. struct doorbell_context_queue *dbcq = ctxt->dbcq;
  482. uint32_t wptr;
  483. struct ctx_queue_header *queue_header = (struct ctx_queue_header *)dbcq->queue_header;
  484. queue_size_dword = queue_header->dwSize;
  485. msg_size_align = ALIGN(msg_req->msg_dwords, 4);
  486. ret = dbcq_queue_wait_freewords(dbcq, msg_size_align);
  487. if (ret)
  488. goto quit;
  489. wptr = queue_header->writeIdx;
  490. move_dwords = msg_req->msg_dwords;
  491. if ((msg_req->msg_dwords + wptr) >= queue_size_dword) {
  492. move_dwords = queue_size_dword - wptr;
  493. resid_move_dwords = msg_req->msg_dwords - move_dwords;
  494. dst = (uint8_t *)dbcq->queue_body;
  495. src = (uint8_t *)msg_req->ptr_data + (move_dwords << 2);
  496. memcpy(dst, src, (resid_move_dwords << 2));
  497. }
  498. dst = (uint8_t *)dbcq->queue_body + (wptr << 2);
  499. src = msg_req->ptr_data;
  500. memcpy(dst, src, (move_dwords << 2));
  501. /* ensure data is committed before update wptr */
  502. dma_wmb();
  503. wptr = (wptr + msg_size_align) % queue_size_dword;
  504. queue_header->writeIdx = wptr;
  505. /* confirm write to memory done before ring door bell. */
  506. wmb();
  507. if (is_global_db(ctxt->tcsr_idx))
  508. /* trigger TCSR interrupt for global doorbell */
  509. tcsr_ring_global_db(hgsl, ctxt->tcsr_idx, dbcq->irq_idx);
  510. else
  511. /* trigger GMU interrupt */
  512. gmu_ring_local_db(hgsl, dbcq->irq_idx);
  513. quit:
  514. /* let user try again incase we miss to submit */
  515. if (-ETIMEDOUT == ret) {
  516. LOGE("Timed out to send db msg, try again\n");
  517. ret = -EAGAIN;
  518. }
  519. return ret;
  520. }
  521. static int db_send_msg(struct hgsl_priv *priv,
  522. struct db_msg_id *db_msg_id,
  523. struct db_msg_request *msg_req,
  524. struct db_msg_response *msg_resp,
  525. struct hgsl_context *ctxt)
  526. {
  527. uint32_t msg_size_align;
  528. int ret;
  529. uint8_t *src, *dst;
  530. uint32_t move_dwords, resid_move_dwords;
  531. uint32_t queue_size_dword;
  532. struct qcom_hgsl *hgsl;
  533. struct doorbell_queue *dbq;
  534. uint32_t wptr;
  535. struct hgsl_db_cmds *cmds;
  536. int retry_count = 0;
  537. uint32_t hard_reset_req = false;
  538. hgsl = priv->dev;
  539. dbq = ctxt->dbq;
  540. mutex_lock(&dbq->lock);
  541. cmds = (struct hgsl_db_cmds *)msg_req->ptr_data;
  542. do {
  543. hard_reset_req = hgsl_dbq_get_state_info((uint32_t *)dbq->vbase,
  544. HGSL_DBQ_METADATA_COOPERATIVE_RESET,
  545. HGSL_DBQ_CONTEXT_ANY,
  546. HGSL_DBQ_HOST_TO_GVM_HARDRESET_REQ);
  547. /* ensure read is done before comparison */
  548. dma_rmb();
  549. if (hard_reset_req) {
  550. if (msleep_interruptible(1)) {
  551. /* Let user handle this */
  552. ret = -EINTR;
  553. goto quit;
  554. }
  555. if (retry_count++ > HGSL_SEND_MSG_MAX_RETRY_COUNT) {
  556. ret = -ETIMEDOUT;
  557. goto quit;
  558. }
  559. }
  560. } while (hard_reset_req);
  561. db_set_busy_state(dbq->vbase, true);
  562. queue_size_dword = dbq->data.dwords;
  563. msg_size_align = ALIGN(msg_req->msg_dwords, 4);
  564. ret = db_queue_wait_freewords(dbq, msg_size_align);
  565. if (ret < 0) {
  566. dev_err(hgsl->dev,
  567. "Timed out waiting for queue to free up\n");
  568. goto quit;
  569. }
  570. wptr = hgsl_dbq_get_state_info((uint32_t *)dbq->vbase,
  571. HGSL_DBQ_METADATA_QUEUE_INDEX, HGSL_DBQ_CONTEXT_ANY,
  572. HGSL_DBQ_WRITE_INDEX_OFFSET_IN_DWORD);
  573. move_dwords = msg_req->msg_dwords;
  574. if ((msg_req->msg_dwords + wptr) >= queue_size_dword) {
  575. move_dwords = queue_size_dword - wptr;
  576. resid_move_dwords = msg_req->msg_dwords - move_dwords;
  577. dst = (uint8_t *)dbq->data.vaddr;
  578. src = msg_req->ptr_data + (move_dwords << 2);
  579. memcpy(dst, src, (resid_move_dwords << 2));
  580. }
  581. dst = dbq->data.vaddr + (wptr << 2);
  582. src = msg_req->ptr_data;
  583. memcpy(dst, src, (move_dwords << 2));
  584. /* ensure data is committed before update wptr */
  585. dma_wmb();
  586. wptr = (wptr + msg_size_align) % queue_size_dword;
  587. hgsl_dbq_set_state_info((uint32_t *)dbq->vbase,
  588. HGSL_DBQ_METADATA_QUEUE_INDEX,
  589. HGSL_DBQ_CONTEXT_ANY,
  590. HGSL_DBQ_WRITE_INDEX_OFFSET_IN_DWORD,
  591. wptr);
  592. hgsl_dbq_set_state_info((uint32_t *)dbq->vbase,
  593. HGSL_DBQ_METADATA_CONTEXT_INFO,
  594. cmds->ctx_id,
  595. HGSL_DBQ_CONTEXT_CONTEXT_ID_OFFSET_IN_DWORD,
  596. cmds->ctx_id);
  597. hgsl_dbq_set_state_info((uint32_t *)dbq->vbase,
  598. HGSL_DBQ_METADATA_CONTEXT_INFO,
  599. ((struct hgsl_db_cmds *)src)->ctx_id,
  600. HGSL_DBQ_CONTEXT_TIMESTAMP_OFFSET_IN_DWORD,
  601. ((struct hgsl_db_cmds *)src)->timestamp);
  602. /* confirm write to memory done before ring door bell. */
  603. wmb();
  604. if (is_global_db(ctxt->tcsr_idx))
  605. /* trigger TCSR interrupt for global doorbell */
  606. tcsr_ring_global_db(hgsl, ctxt->tcsr_idx, dbq->dbq_idx);
  607. else
  608. /* trigger GMU interrupt */
  609. gmu_ring_local_db(hgsl, dbq->dbq_idx);
  610. quit:
  611. db_set_busy_state(dbq->vbase, false);
  612. mutex_unlock(&dbq->lock);
  613. /* let user try again incase we miss to submit */
  614. if (-ETIMEDOUT == ret) {
  615. LOGE("Timed out to send db msg, try again\n");
  616. ret = -EAGAIN;
  617. }
  618. return ret;
  619. }
  620. static int hgsl_db_next_timestamp(struct hgsl_context *ctxt,
  621. uint32_t *timestamp)
  622. {
  623. if (timestamp == NULL) {
  624. LOGE("invalid timestamp");
  625. return -EINVAL;
  626. } else if ((ctxt->flags & GSL_CONTEXT_FLAG_USER_GENERATED_TS) == 0) {
  627. return 0;
  628. } else if (ctxt->flags & GSL_CONTEXT_FLAG_CLIENT_GENERATED_TS) {
  629. if (hgsl_ts32_ge(ctxt->queued_ts, *timestamp)) {
  630. LOGW("ctx:%d next client ts %d isn't greater than current ts %d",
  631. ctxt->context_id, *timestamp, ctxt->queued_ts);
  632. return -ERANGE;
  633. }
  634. } else {
  635. /*
  636. * callers use 0 and ~0 as special values, do not assign them as
  637. * timestamps, instead rollover to 1.
  638. */
  639. *timestamp = ctxt->queued_ts + 1;
  640. if (*timestamp == UINT_MAX)
  641. *timestamp = 1;
  642. }
  643. return 0;
  644. }
  645. static void ts_retire_worker(struct work_struct *work)
  646. {
  647. struct qcom_hgsl *hgsl =
  648. container_of(work, struct qcom_hgsl, ts_retire_work);
  649. struct hgsl_active_wait *wait, *w;
  650. spin_lock(&hgsl->active_wait_lock);
  651. list_for_each_entry_safe(wait, w, &hgsl->active_wait_list, head) {
  652. if (_timestamp_retired(wait->ctxt, wait->timestamp))
  653. wake_up_all(&wait->ctxt->wait_q);
  654. }
  655. spin_unlock(&hgsl->active_wait_lock);
  656. _signal_contexts(hgsl);
  657. }
  658. static irqreturn_t hgsl_tcsr_isr(struct device *dev, uint32_t status)
  659. {
  660. struct platform_device *pdev = to_platform_device(dev);
  661. struct qcom_hgsl *hgsl = platform_get_drvdata(pdev);
  662. if ((status & GLB_DB_DEST_TS_RETIRE_IRQ_MASK) == 0)
  663. return IRQ_NONE;
  664. queue_work(hgsl->wq, &hgsl->ts_retire_work);
  665. return IRQ_HANDLED;
  666. }
  667. static int hgsl_init_global_db(struct qcom_hgsl *hgsl,
  668. enum hgsl_tcsr_role role, int idx)
  669. {
  670. struct device *dev = hgsl->dev;
  671. struct device_node *np = dev->of_node;
  672. bool is_sender = (role == HGSL_TCSR_ROLE_SENDER);
  673. const char *node_name = is_sender ? "qcom,glb-db-senders" :
  674. "qcom,glb-db-receivers";
  675. struct device_node *tcsr_np;
  676. struct platform_device *tcsr_pdev;
  677. struct hgsl_tcsr *tcsr;
  678. int ret;
  679. if (hgsl->tcsr[idx][role] != NULL)
  680. return 0;
  681. tcsr_np = of_parse_phandle(np, node_name, idx);
  682. if (IS_ERR_OR_NULL(tcsr_np)) {
  683. dev_err(dev, "failed to find %s node\n", node_name);
  684. ret = -ENODEV;
  685. goto fail;
  686. }
  687. tcsr_pdev = of_find_device_by_node(tcsr_np);
  688. if (IS_ERR_OR_NULL(tcsr_pdev)) {
  689. dev_err(dev,
  690. "failed to find %s tcsr dev from node\n",
  691. is_sender ? "sender" : "receiver");
  692. ret = -ENODEV;
  693. goto fail;
  694. }
  695. if (!is_sender && !hgsl->wq) {
  696. hgsl->wq = alloc_workqueue("hgsl-wq", WQ_HIGHPRI, 0);
  697. if (!hgsl->wq) {
  698. dev_err(dev, "failed to create workqueue\n");
  699. ret = -ENOMEM;
  700. goto fail;
  701. }
  702. INIT_WORK(&hgsl->ts_retire_work, ts_retire_worker);
  703. INIT_LIST_HEAD(&hgsl->active_wait_list);
  704. spin_lock_init(&hgsl->active_wait_lock);
  705. }
  706. tcsr = hgsl_tcsr_request(tcsr_pdev, role, dev,
  707. is_sender ? NULL : hgsl_tcsr_isr);
  708. if (IS_ERR_OR_NULL(tcsr)) {
  709. dev_err(dev,
  710. "failed to request %s tcsr, ret %lx\n",
  711. is_sender ? "sender" : "receiver", PTR_ERR(tcsr));
  712. ret = tcsr ? PTR_ERR(tcsr) : -ENODEV;
  713. goto destroy_wq;
  714. }
  715. ret = hgsl_tcsr_enable(tcsr);
  716. if (ret) {
  717. dev_err(dev,
  718. "failed to enable %s tcsr, ret %d\n",
  719. is_sender ? "sender" : "receiver", ret);
  720. goto free_tcsr;
  721. }
  722. if (!is_sender)
  723. hgsl_tcsr_irq_enable(tcsr, GLB_DB_DEST_TS_RETIRE_IRQ_MASK,
  724. true);
  725. hgsl->tcsr[idx][role] = tcsr;
  726. return 0;
  727. free_tcsr:
  728. hgsl_tcsr_free(tcsr);
  729. destroy_wq:
  730. if (hgsl->wq) {
  731. destroy_workqueue(hgsl->wq);
  732. hgsl->wq = NULL;
  733. }
  734. fail:
  735. return ret;
  736. }
  737. static int hgsl_init_local_db(struct qcom_hgsl *hgsl)
  738. {
  739. struct platform_device *pdev = to_platform_device(hgsl->dev);
  740. if (hgsl->reg_dbidx.vaddr != NULL)
  741. return 0;
  742. else
  743. return hgsl_reg_map(pdev, IORESOURCE_GMUCX, &hgsl->reg_dbidx);
  744. }
  745. static int hgsl_init_db_signal(struct qcom_hgsl *hgsl, int tcsr_idx)
  746. {
  747. int ret;
  748. mutex_lock(&hgsl->mutex);
  749. if (is_global_db(tcsr_idx)) {
  750. ret = hgsl_init_global_db(hgsl, HGSL_TCSR_ROLE_SENDER,
  751. tcsr_idx);
  752. ret |= hgsl_init_global_db(hgsl, HGSL_TCSR_ROLE_RECEIVER,
  753. tcsr_idx);
  754. } else {
  755. ret = hgsl_init_local_db(hgsl);
  756. }
  757. mutex_unlock(&hgsl->mutex);
  758. return ret;
  759. }
  760. static void hgsl_dbcq_init(struct hgsl_priv *priv,
  761. struct hgsl_context *ctxt, uint32_t db_signal,
  762. uint32_t gmuaddr, uint32_t irq_idx)
  763. {
  764. struct qcom_hgsl *hgsl = priv->dev;
  765. struct doorbell_context_queue *dbcq = NULL;
  766. int tcsr_idx = 0;
  767. int ret = 0;
  768. if ((db_signal <= DB_SIGNAL_INVALID) ||
  769. (db_signal > DBCQ_SIGNAL_MAX) ||
  770. (gmuaddr == 0) ||
  771. (irq_idx == GLB_DB_DEST_TS_RETIRE_IRQ_ID)) {
  772. LOGE("Invalid db signal %d or queue buffer 0x%x\n or irq_idx %d",
  773. db_signal, gmuaddr, irq_idx);
  774. goto err;
  775. }
  776. dbcq = hgsl_zalloc(sizeof(struct doorbell_context_queue));
  777. if (!dbcq) {
  778. LOGE("Failed to allocate memory for doorbell context queue\n");
  779. goto err;
  780. }
  781. tcsr_idx = db_signal - DB_SIGNAL_GLOBAL_0;
  782. ret = hgsl_init_db_signal(hgsl, tcsr_idx);
  783. if (ret != 0) {
  784. LOGE("failed to init dbcq signal %d", db_signal);
  785. goto err;
  786. }
  787. dbcq->db_signal = db_signal;
  788. dbcq->irq_idx = irq_idx;
  789. dbcq->queue_header_gmuaddr = gmuaddr;
  790. dbcq->queue_body_gmuaddr = dbcq->queue_header_gmuaddr + HGSL_CTXT_QUEUE_BODY_OFFSET;
  791. dbcq->indirect_ibs_gmuaddr =
  792. dbcq->queue_header_gmuaddr + HGSL_CTXT_QUEUE_INDIRECT_IB_OFFSET;
  793. ctxt->tcsr_idx = tcsr_idx;
  794. ctxt->dbcq = dbcq;
  795. return;
  796. err:
  797. hgsl_free(dbcq);
  798. }
  799. static void hgsl_dbcq_close(struct hgsl_context *ctxt)
  800. {
  801. struct doorbell_context_queue *dbcq = ctxt->dbcq;
  802. if (!dbcq)
  803. return;
  804. if (dbcq->queue_mem != NULL) {
  805. if (dbcq->queue_mem->dma_buf != NULL) {
  806. if (dbcq->queue_header != NULL) {
  807. dma_buf_vunmap(dbcq->queue_mem->dma_buf, &dbcq->map);
  808. dbcq->queue_header = NULL;
  809. }
  810. dma_buf_end_cpu_access(dbcq->queue_mem->dma_buf,
  811. DMA_BIDIRECTIONAL);
  812. }
  813. hgsl_sharedmem_free(dbcq->queue_mem);
  814. }
  815. hgsl_free(dbcq);
  816. ctxt->dbcq = NULL;
  817. }
  818. static int hgsl_dbcq_open(struct hgsl_priv *priv,
  819. struct hgsl_context *ctxt)
  820. {
  821. struct qcom_hgsl *hgsl = priv->dev;
  822. struct doorbell_context_queue *dbcq = ctxt->dbcq;
  823. struct hgsl_hab_channel_t *hab_channel = NULL;
  824. int ret = 0;
  825. struct ctx_queue_header *queue_header = NULL;
  826. if (!dbcq) {
  827. ret = -EPERM;
  828. goto out;
  829. }
  830. if (dbcq->queue_header != NULL)
  831. goto out;
  832. ret = hgsl_hyp_channel_pool_get(&priv->hyp_priv, 0, &hab_channel);
  833. if (ret == -EINTR) {
  834. goto out;
  835. } else if (ret != 0) {
  836. LOGE("failed to open hab channel");
  837. goto err;
  838. }
  839. dbcq->queue_mem = hgsl_mem_node_zalloc(hgsl->default_iocoherency);
  840. if (!dbcq->queue_mem) {
  841. LOGE("out of memory");
  842. ret = -ENOMEM;
  843. goto err;
  844. }
  845. dbcq->queue_mem->flags = GSL_MEMFLAGS_UNCACHED | GSL_MEMFLAGS_ALIGN4K;
  846. ret = hgsl_sharedmem_alloc(hgsl->dev, HGSL_CTXT_QUEUE_TOTAL_SIZE,
  847. dbcq->queue_mem->flags, dbcq->queue_mem);
  848. if (ret != 0) {
  849. LOGE("Failed to allocate memory for doorbell context queue buffer\n");
  850. goto err;
  851. }
  852. dma_buf_begin_cpu_access(dbcq->queue_mem->dma_buf, DMA_BIDIRECTIONAL);
  853. ret = dma_buf_vmap(dbcq->queue_mem->dma_buf, &dbcq->map);
  854. if (ret) {
  855. LOGE("failed to map dbq buffer");
  856. goto err;
  857. }
  858. dbcq->queue_header = dbcq->map.vaddr;
  859. dbcq->queue_body = (void *)((uint8_t *)dbcq->queue_header + HGSL_CTXT_QUEUE_BODY_OFFSET);
  860. dbcq->indirect_ibs =
  861. (void *)((uint8_t *)dbcq->queue_header + HGSL_CTXT_QUEUE_INDIRECT_IB_OFFSET);
  862. dbcq->queue_size = HGSL_CTXT_QUEUE_BODY_DWSIZE;
  863. queue_header = (struct ctx_queue_header *)dbcq->queue_header;
  864. queue_header->version = 0;
  865. queue_header->startAddr = dbcq->queue_body_gmuaddr;
  866. queue_header->dwSize = HGSL_CTXT_QUEUE_BODY_DWSIZE;
  867. queue_header->readIdx = 0;
  868. queue_header->writeIdx = 0;
  869. queue_header->dbqSignal = dbcq->db_signal;
  870. ret = hgsl_hyp_context_register_dbcq(hab_channel, ctxt->devhandle, ctxt->context_id,
  871. dbcq->queue_mem->dma_buf, dbcq->queue_mem->memdesc.size,
  872. HGSL_CTXT_QUEUE_BODY_OFFSET, &ctxt->dbcq_export_id);
  873. if (ret) {
  874. LOGE("Failed to register dbcq %d\n", ret);
  875. goto err;
  876. }
  877. goto out;
  878. err:
  879. hgsl_dbcq_close(ctxt);
  880. ret = -EPERM;
  881. out:
  882. hgsl_hyp_channel_pool_put(hab_channel);
  883. LOGI("%d", ret);
  884. return ret;
  885. }
  886. static int hgsl_dbcq_issue_cmd(struct hgsl_priv *priv,
  887. struct hgsl_context *ctxt, uint32_t num_ibs,
  888. uint32_t gmu_cmd_flags,
  889. uint32_t *timestamp,
  890. struct hgsl_fw_ib_desc ib_descs[],
  891. uint64_t user_profile_gpuaddr)
  892. {
  893. int ret;
  894. uint32_t msg_dwords;
  895. uint32_t msg_buf_sz;
  896. uint32_t msg_dwords_aligned;
  897. struct hgsl_db_cmds *cmds = NULL;
  898. struct db_msg_request req;
  899. struct db_msg_response resp;
  900. struct db_msg_id db_msg_id;
  901. struct doorbell_context_queue *dbcq = NULL;
  902. struct qcom_hgsl *hgsl = priv->dev;
  903. bool is_batch_ibdesc = false;
  904. mutex_lock(&ctxt->lock);
  905. ret = hgsl_dbcq_open(priv, ctxt);
  906. if (ret)
  907. goto out;
  908. dbcq = ctxt->dbcq;
  909. db_msg_id.msg_id = HTOF_MSG_ISSUE_CMD;
  910. db_msg_id.seq_no = dbcq->seq_num++;
  911. if ((num_ibs > ECP_MAX_NUM_IB1) ||
  912. (HGSL_CTXT_QUEUE_INDIRECT_IB_SIZE < (num_ibs * sizeof(struct hgsl_fw_ib_desc)))) {
  913. LOGE("Invalid num_ibs %d for context %d", num_ibs, ctxt->context_id);
  914. LOGE("max ib num %d, max indirect ib buffer size %d",
  915. ECP_MAX_NUM_IB1, HGSL_CTXT_QUEUE_INDIRECT_IB_SIZE);
  916. ret = -EINVAL;
  917. goto out;
  918. }
  919. msg_dwords = MSG_ISSUE_INF_SZ() + MSG_ISSUE_IBS_SZ(num_ibs);
  920. msg_dwords_aligned = ALIGN(msg_dwords, 4);
  921. // check if we need to do batch submission
  922. if ((msg_dwords_aligned >= dbcq->queue_size) ||
  923. (msg_dwords_aligned > (MSG_SZ_MASK >> MSG_SZ_SHIFT))) {
  924. msg_dwords = MSG_ISSUE_INF_SZ();
  925. msg_dwords_aligned = ALIGN(msg_dwords, 4);
  926. is_batch_ibdesc = true;
  927. LOGI("Number of IBs exceeded. Proceeding with CMDBATCH_IBDESC");
  928. }
  929. msg_buf_sz = msg_dwords_aligned << 2;
  930. ret = hgsl_db_next_timestamp(ctxt, timestamp);
  931. if (ret)
  932. goto out;
  933. cmds = hgsl_zalloc(msg_buf_sz);
  934. if (cmds == NULL) {
  935. ret = -ENOMEM;
  936. goto out;
  937. }
  938. cmds->header = (union hfi_msg_header)HFI_ISSUE_IB_HEADER(num_ibs,
  939. msg_dwords,
  940. db_msg_id.seq_no);
  941. cmds->ctx_id = ctxt->context_id;
  942. cmds->num_ibs = num_ibs;
  943. cmds->cmd_flags = gmu_cmd_flags;
  944. cmds->timestamp = *timestamp;
  945. cmds->user_profile_gpuaddr = user_profile_gpuaddr;
  946. if (is_batch_ibdesc) {
  947. // wait for IB buffer
  948. ret = dbcq_get_free_indirect_ib_buffer(priv, ctxt, *timestamp,
  949. HGSL_DBCQ_IBDESC_SHORT_WAIT_MSEC);
  950. if (ret)
  951. goto out;
  952. cmds->ib_desc_gmuaddr = dbcq->indirect_ibs_gmuaddr;
  953. cmds->cmd_flags |= CMDBATCH_INDIRECT;
  954. memcpy(dbcq->indirect_ibs, ib_descs, sizeof(ib_descs[0]) * num_ibs);
  955. } else {
  956. memcpy(cmds->ib_descs, ib_descs, sizeof(ib_descs[0]) * num_ibs);
  957. }
  958. req.msg_has_response = 0;
  959. req.msg_has_ret_packet = 0;
  960. req.ignore_ret_packet = 1;
  961. req.msg_dwords = msg_dwords;
  962. req.ptr_data = cmds;
  963. if (!ctxt->is_killed) {
  964. ret = dbcq_send_msg(priv, &db_msg_id, &req, &resp, ctxt);
  965. } else {
  966. /* Retire ts immediately*/
  967. set_context_retired_ts(ctxt, *timestamp);
  968. /* Trigger event to waitfor ts thread */
  969. _signal_contexts(hgsl);
  970. ret = 0;
  971. }
  972. if (ret == 0) {
  973. ctxt->queued_ts = *timestamp;
  974. if (!is_batch_ibdesc) {
  975. /*
  976. * Check if we can release the indirect ib buffer.
  977. * If indirect ib has retired, set dbcq->indirect_ib_ts to 0.
  978. * We send timeout as 0 as we just want to do a quick check.
  979. * If ts didn't retire, just check next time when we do submission.
  980. */
  981. dbcq_get_free_indirect_ib_buffer(priv, ctxt, 0, 0);
  982. }
  983. }
  984. out:
  985. hgsl_free(cmds);
  986. mutex_unlock(&ctxt->lock);
  987. return ret;
  988. }
  989. static int hgsl_db_issue_cmd(struct hgsl_priv *priv,
  990. struct hgsl_context *ctxt, uint32_t num_ibs,
  991. uint32_t gmu_cmd_flags,
  992. uint32_t *timestamp,
  993. struct hgsl_fw_ib_desc ib_descs[],
  994. uint64_t user_profile_gpuaddr)
  995. {
  996. int ret = 0;
  997. uint32_t msg_dwords;
  998. uint32_t msg_buf_sz;
  999. uint32_t msg_dwords_aligned;
  1000. struct hgsl_db_cmds *cmds;
  1001. struct db_msg_request req;
  1002. struct db_msg_response resp;
  1003. struct db_msg_id db_msg_id;
  1004. struct doorbell_queue *dbq = ctxt->dbq;
  1005. struct qcom_hgsl *hgsl = priv->dev;
  1006. bool is_batch_ibdesc = false;
  1007. uint8_t *dst;
  1008. ret = hgsl_dbcq_issue_cmd(priv, ctxt, num_ibs, gmu_cmd_flags,
  1009. timestamp, ib_descs, user_profile_gpuaddr);
  1010. if (ret != -EPERM)
  1011. return ret;
  1012. if (dbq == NULL)
  1013. return -EPERM;
  1014. db_msg_id.msg_id = HTOF_MSG_ISSUE_CMD;
  1015. db_msg_id.seq_no = atomic_inc_return(&dbq->seq_num);
  1016. if ((num_ibs > (UINT_MAX / (sizeof(struct hgsl_fw_ib_desc) >> 2))) ||
  1017. (MSG_ISSUE_INF_SZ() > (UINT_MAX - MSG_ISSUE_IBS_SZ(num_ibs))))
  1018. return -EINVAL;
  1019. msg_dwords = MSG_ISSUE_INF_SZ() + MSG_ISSUE_IBS_SZ(num_ibs);
  1020. msg_dwords_aligned = ALIGN(msg_dwords, 4);
  1021. if (num_ibs > ECP_MAX_NUM_IB1) {
  1022. LOGE("number of ibs %d exceed max %d",
  1023. num_ibs, ECP_MAX_NUM_IB1);
  1024. return -EINVAL;
  1025. }
  1026. if ((msg_dwords_aligned >= dbq->data.dwords) ||
  1027. (msg_dwords_aligned > (MSG_SZ_MASK >> MSG_SZ_SHIFT))) {
  1028. if ((MSG_ISSUE_IBS_SZ(num_ibs) << 2) <= dbq->ibdesc_max_size) {
  1029. msg_dwords = MSG_ISSUE_INF_SZ();
  1030. msg_dwords_aligned = ALIGN(msg_dwords, 4);
  1031. is_batch_ibdesc = true;
  1032. LOGI("Number of IBs exceed. Proceeding with CMDBATCH_IBDESC");
  1033. } else {
  1034. dev_err(hgsl->dev, "number of IBs exceed\n");
  1035. return -EINVAL;
  1036. }
  1037. }
  1038. msg_buf_sz = msg_dwords_aligned << 2;
  1039. ret = hgsl_db_next_timestamp(ctxt, timestamp);
  1040. if (ret)
  1041. return ret;
  1042. cmds = hgsl_zalloc(msg_buf_sz);
  1043. if (cmds == NULL)
  1044. return -ENOMEM;
  1045. cmds->header = (union hfi_msg_header)HFI_ISSUE_IB_HEADER(num_ibs,
  1046. msg_dwords,
  1047. db_msg_id.seq_no);
  1048. cmds->ctx_id = ctxt->context_id;
  1049. cmds->num_ibs = num_ibs;
  1050. cmds->cmd_flags = gmu_cmd_flags;
  1051. cmds->timestamp = *timestamp;
  1052. cmds->user_profile_gpuaddr = user_profile_gpuaddr;
  1053. if (!is_batch_ibdesc) {
  1054. memcpy(cmds->ib_descs, ib_descs, sizeof(ib_descs[0]) * num_ibs);
  1055. } else {
  1056. mutex_lock(&dbq->lock);
  1057. /* wait for the buffer */
  1058. ret = dbq_wait_free_ibdesc(hgsl, ctxt,
  1059. HGSL_DBQ_IBDESC_REQUEST_ACQUIRE,
  1060. HGSL_DBQ_IBDESC_SHORT_WAIT);
  1061. if (ret) {
  1062. mutex_unlock(&dbq->lock);
  1063. goto err;
  1064. }
  1065. dbq->ibdesc_priv.buf_inuse = true;
  1066. dbq->ibdesc_priv.context_id = ctxt->context_id;
  1067. dbq->ibdesc_priv.timestamp = *timestamp;
  1068. cmds->cmd_flags = gmu_cmd_flags | CMDBATCH_INDIRECT;
  1069. cmds->ib_desc_gmuaddr = dbq->gmuaddr +
  1070. (HGSL_DBQ_IBDESC_BASE_OFFSET_IN_DWORD << 2);
  1071. dst = (uint8_t *)dbq->vbase +
  1072. (HGSL_DBQ_IBDESC_BASE_OFFSET_IN_DWORD << 2);
  1073. memcpy(dst, ib_descs, sizeof(ib_descs[0]) * num_ibs);
  1074. mutex_unlock(&dbq->lock);
  1075. }
  1076. req.msg_has_response = 0;
  1077. req.msg_has_ret_packet = 0;
  1078. req.ignore_ret_packet = 1;
  1079. req.msg_dwords = msg_dwords;
  1080. req.ptr_data = cmds;
  1081. if (!ctxt->is_killed) {
  1082. ret = db_send_msg(priv, &db_msg_id, &req, &resp, ctxt);
  1083. } else {
  1084. /* Retire ts immediately*/
  1085. set_context_retired_ts(ctxt, *timestamp);
  1086. /* Trigger event to waitfor ts thread */
  1087. _signal_contexts(hgsl);
  1088. ret = 0;
  1089. }
  1090. if (ret == 0)
  1091. ctxt->queued_ts = *timestamp;
  1092. err:
  1093. hgsl_free(cmds);
  1094. return ret;
  1095. }
  1096. #define USRPTR(a) u64_to_user_ptr((uint64_t)(a))
  1097. static void hgsl_reset_dbq(struct doorbell_queue *dbq)
  1098. {
  1099. if (dbq->dma) {
  1100. dma_buf_end_cpu_access(dbq->dma,
  1101. DMA_BIDIRECTIONAL);
  1102. if (dbq->vbase) {
  1103. dma_buf_vunmap(dbq->dma, &dbq->map);
  1104. dbq->vbase = NULL;
  1105. }
  1106. dma_buf_put(dbq->dma);
  1107. dbq->dma = NULL;
  1108. }
  1109. dbq->state = DB_STATE_Q_UNINIT;
  1110. }
  1111. static inline uint32_t get_context_retired_ts(struct hgsl_context *ctxt)
  1112. {
  1113. unsigned int ts = ctxt->shadow_ts->eop;
  1114. /* ensure read is done before comparison */
  1115. dma_rmb();
  1116. return ts;
  1117. }
  1118. static inline void set_context_retired_ts(struct hgsl_context *ctxt,
  1119. unsigned int ts)
  1120. {
  1121. ctxt->shadow_ts->eop = ts;
  1122. /* ensure update is done before return */
  1123. dma_wmb();
  1124. }
  1125. static inline bool _timestamp_retired(struct hgsl_context *ctxt,
  1126. unsigned int timestamp)
  1127. {
  1128. return hgsl_ts32_ge(get_context_retired_ts(ctxt), timestamp);
  1129. }
  1130. static inline void _destroy_context(struct kref *kref);
  1131. static void _signal_contexts(struct qcom_hgsl *hgsl)
  1132. {
  1133. struct hgsl_context *ctxt;
  1134. int i;
  1135. uint32_t ts;
  1136. for (i = 0; i < HGSL_CONTEXT_NUM; i++) {
  1137. ctxt = hgsl_get_context(hgsl, i);
  1138. if ((ctxt == NULL) || (ctxt->timeline == NULL)) {
  1139. hgsl_put_context(ctxt);
  1140. continue;
  1141. }
  1142. ts = get_context_retired_ts(ctxt);
  1143. if (ts != ctxt->last_ts) {
  1144. hgsl_hsync_timeline_signal(ctxt->timeline, ts);
  1145. ctxt->last_ts = ts;
  1146. }
  1147. hgsl_put_context(ctxt);
  1148. }
  1149. }
  1150. static int hgsl_init_context(struct qcom_hgsl *hgsl)
  1151. {
  1152. int ret = 0;
  1153. hgsl->contexts = kzalloc(sizeof(struct hgsl_context *) *
  1154. HGSL_CONTEXT_NUM, GFP_KERNEL);
  1155. if (!hgsl->contexts) {
  1156. ret = -ENOMEM;
  1157. goto out;
  1158. }
  1159. rwlock_init(&hgsl->ctxt_lock);
  1160. out:
  1161. return ret;
  1162. }
  1163. static int hgsl_init_global_hyp_channel(struct qcom_hgsl *hgsl)
  1164. {
  1165. int ret = 0;
  1166. int rval = 0;
  1167. ret = hgsl_hyp_init(&hgsl->global_hyp, hgsl->dev, 0, "hgsl");
  1168. if (ret != 0)
  1169. goto out;
  1170. ret = hgsl_hyp_gsl_lib_open(&hgsl->global_hyp, 0, &rval);
  1171. if (rval)
  1172. ret = -EINVAL;
  1173. else
  1174. hgsl->global_hyp_inited = true;
  1175. out:
  1176. if (ret)
  1177. hgsl_hyp_close(&hgsl->global_hyp);
  1178. return ret;
  1179. }
  1180. static int hgsl_dbq_init(struct qcom_hgsl *hgsl,
  1181. uint32_t dbq_idx, uint32_t db_signal)
  1182. {
  1183. struct doorbell_queue *dbq;
  1184. struct dma_buf *dma_buf;
  1185. int tcsr_idx;
  1186. int ret;
  1187. if ((db_signal <= DB_SIGNAL_INVALID) ||
  1188. (db_signal > DB_SIGNAL_MAX)) {
  1189. LOGE("Invalid db signal %d\n", db_signal);
  1190. return -EINVAL;
  1191. }
  1192. if (dbq_idx >= MAX_DB_QUEUE) {
  1193. LOGE("Invalid dbq_idx %d\n", dbq_idx);
  1194. return -EINVAL;
  1195. }
  1196. if ((dbq_idx == GLB_DB_DEST_TS_RETIRE_IRQ_ID) && (db_signal != DB_SIGNAL_LOCAL)) {
  1197. LOGE("TCSR send and receive irq bit conflict %d, %d", dbq_idx, db_signal);
  1198. return -EINVAL;
  1199. }
  1200. dbq = &hgsl->dbq[dbq_idx];
  1201. mutex_lock(&dbq->lock);
  1202. if (dbq->state == DB_STATE_Q_INIT_DONE) {
  1203. mutex_unlock(&dbq->lock);
  1204. return 0;
  1205. }
  1206. ret = hgsl_hyp_get_dbq_info(&hgsl->global_hyp, dbq_idx,
  1207. &hgsl->dbq_info[dbq_idx]);
  1208. if (ret) {
  1209. LOGE("Failed to get dbq info %d\n", ret);
  1210. goto err;
  1211. }
  1212. dma_buf = hgsl->dbq_info[dbq_idx].dma_buf;
  1213. dbq->state = DB_STATE_Q_FAULT;
  1214. dbq->dma = dma_buf;
  1215. dbq->dbq_idx = dbq_idx;
  1216. dbq->gmuaddr = hgsl->dbq_info[dbq_idx].gmuaddr;
  1217. dbq->ibdesc_max_size = hgsl->dbq_info[dbq_idx].ibdesc_max_size;
  1218. atomic_set(&dbq->seq_num, 0);
  1219. dma_buf_begin_cpu_access(dbq->dma, DMA_BIDIRECTIONAL);
  1220. ret = dma_buf_vmap(dbq->dma, &dbq->map);
  1221. if (ret)
  1222. goto err;
  1223. dbq->vbase = dbq->map.vaddr;
  1224. dbq->data.vaddr = (uint32_t *)dbq->vbase +
  1225. hgsl->dbq_info[dbq_idx].queue_off_dwords;
  1226. dbq->data.dwords = hgsl->dbq_info[dbq_idx].queue_dwords;
  1227. tcsr_idx = (db_signal != DB_SIGNAL_LOCAL) ?
  1228. db_signal - DB_SIGNAL_GLOBAL_0 : -1;
  1229. ret = hgsl_init_db_signal(hgsl, tcsr_idx);
  1230. if (ret != 0) {
  1231. LOGE("failed to init dbq signal %d, idx %d",
  1232. db_signal, dbq_idx);
  1233. goto err;
  1234. }
  1235. dbq->tcsr_idx = tcsr_idx;
  1236. dbq->state = DB_STATE_Q_INIT_DONE;
  1237. mutex_unlock(&dbq->lock);
  1238. return 0;
  1239. err:
  1240. hgsl_reset_dbq(dbq);
  1241. mutex_unlock(&dbq->lock);
  1242. return ret;
  1243. }
  1244. static void _cleanup_shadow(struct hgsl_hab_channel_t *hab_channel,
  1245. struct hgsl_context *ctxt)
  1246. {
  1247. struct hgsl_mem_node *mem_node = ctxt->shadow_ts_node;
  1248. if (!mem_node)
  1249. return;
  1250. if (mem_node->dma_buf) {
  1251. if (ctxt->shadow_ts) {
  1252. dma_buf_vunmap(mem_node->dma_buf, &ctxt->map);
  1253. ctxt->shadow_ts = NULL;
  1254. }
  1255. dma_buf_end_cpu_access(mem_node->dma_buf, DMA_FROM_DEVICE);
  1256. }
  1257. if (ctxt->is_fe_shadow) {
  1258. hgsl_hyp_mem_unmap_smmu(hab_channel, mem_node);
  1259. hgsl_sharedmem_free(mem_node);
  1260. } else {
  1261. hgsl_hyp_put_shadowts_mem(hab_channel, mem_node);
  1262. kfree(mem_node);
  1263. }
  1264. ctxt->shadow_ts_flags = 0;
  1265. ctxt->is_fe_shadow = false;
  1266. ctxt->shadow_ts_node = NULL;
  1267. }
  1268. static inline void _destroy_context(struct kref *kref)
  1269. {
  1270. struct hgsl_context *ctxt =
  1271. container_of(kref, struct hgsl_context, kref);
  1272. struct doorbell_queue *dbq = ctxt->dbq;
  1273. LOGD("%d", ctxt->context_id);
  1274. if (ctxt->timeline) {
  1275. hgsl_hsync_timeline_fini(ctxt);
  1276. hgsl_hsync_timeline_put(ctxt->timeline);
  1277. }
  1278. if (dbq != NULL) {
  1279. hgsl_dbq_set_state_info((uint32_t *)dbq->vbase,
  1280. HGSL_DBQ_METADATA_CONTEXT_INFO,
  1281. ctxt->context_id,
  1282. HGSL_DBQ_CONTEXT_DESTROY_OFFSET_IN_DWORD,
  1283. 1);
  1284. }
  1285. /* ensure update dbq metadata is done */
  1286. dma_wmb();
  1287. ctxt->destroyed = true;
  1288. }
  1289. static struct hgsl_context *hgsl_get_context(struct qcom_hgsl *hgsl,
  1290. uint32_t context_id)
  1291. {
  1292. struct hgsl_context *ctxt = NULL;
  1293. if (context_id < HGSL_CONTEXT_NUM) {
  1294. read_lock(&hgsl->ctxt_lock);
  1295. ctxt = hgsl->contexts[context_id];
  1296. if (ctxt)
  1297. kref_get(&ctxt->kref);
  1298. read_unlock(&hgsl->ctxt_lock);
  1299. }
  1300. return ctxt;
  1301. }
  1302. static struct hgsl_context *hgsl_get_context_owner(struct hgsl_priv *priv,
  1303. uint32_t context_id)
  1304. {
  1305. struct hgsl_context *ctxt = NULL;
  1306. struct qcom_hgsl *hgsl = priv->dev;
  1307. ctxt = hgsl_get_context(hgsl, context_id);
  1308. if ((ctxt != NULL) && (ctxt->priv != priv)) {
  1309. hgsl_put_context(ctxt);
  1310. ctxt = NULL;
  1311. }
  1312. return ctxt;
  1313. }
  1314. static struct hgsl_context *hgsl_remove_context(struct hgsl_priv *priv,
  1315. uint32_t context_id)
  1316. {
  1317. struct hgsl_context *ctxt = NULL;
  1318. struct qcom_hgsl *hgsl = priv->dev;
  1319. if (context_id < HGSL_CONTEXT_NUM) {
  1320. write_lock(&hgsl->ctxt_lock);
  1321. ctxt = hgsl->contexts[context_id];
  1322. if ((ctxt != NULL) && (ctxt->priv == priv))
  1323. hgsl->contexts[context_id] = NULL;
  1324. else
  1325. ctxt = NULL;
  1326. write_unlock(&hgsl->ctxt_lock);
  1327. }
  1328. return ctxt;
  1329. }
  1330. static int hgsl_check_context_owner(struct hgsl_priv *priv,
  1331. uint32_t context_id)
  1332. {
  1333. struct hgsl_context *ctxt = hgsl_get_context_owner(priv, context_id);
  1334. int ret = -EINVAL;
  1335. if (ctxt) {
  1336. hgsl_put_context(ctxt);
  1337. ret = 0;
  1338. }
  1339. return ret;
  1340. }
  1341. static void hgsl_put_context(struct hgsl_context *ctxt)
  1342. {
  1343. if (ctxt)
  1344. kref_put(&ctxt->kref, _destroy_context);
  1345. }
  1346. static int hgsl_read_shadow_timestamp(struct hgsl_context *ctxt,
  1347. enum gsl_timestamp_type_t type,
  1348. uint32_t *timestamp)
  1349. {
  1350. int ret = -EINVAL;
  1351. if (ctxt && ctxt->shadow_ts) {
  1352. switch (type) {
  1353. case GSL_TIMESTAMP_RETIRED:
  1354. *timestamp = ctxt->shadow_ts->eop;
  1355. ret = 0;
  1356. break;
  1357. case GSL_TIMESTAMP_CONSUMED:
  1358. *timestamp = ctxt->shadow_ts->sop;
  1359. ret = 0;
  1360. break;
  1361. case GSL_TIMESTAMP_QUEUED:
  1362. //todo
  1363. break;
  1364. default:
  1365. break;
  1366. }
  1367. /* ensure read is done before return */
  1368. dma_rmb();
  1369. }
  1370. LOGD("%d, %u, %u, %u", ret, ctxt->context_id, type, *timestamp);
  1371. return ret;
  1372. }
  1373. static int hgsl_check_shadow_timestamp(struct hgsl_context *ctxt,
  1374. enum gsl_timestamp_type_t type,
  1375. uint32_t timestamp, bool *expired)
  1376. {
  1377. uint32_t ts_read = 0;
  1378. int ret = hgsl_read_shadow_timestamp(ctxt, type, &ts_read);
  1379. if (!ret)
  1380. *expired = hgsl_ts32_ge(ts_read, timestamp);
  1381. return ret;
  1382. }
  1383. static void hgsl_get_shadowts_mem(struct hgsl_hab_channel_t *hab_channel,
  1384. struct hgsl_context *ctxt)
  1385. {
  1386. struct dma_buf *dma_buf = NULL;
  1387. int ret = 0;
  1388. if (ctxt->shadow_ts_node)
  1389. return;
  1390. ctxt->shadow_ts_node = hgsl_zalloc(sizeof(*ctxt->shadow_ts_node));
  1391. if (ctxt->shadow_ts_node == NULL) {
  1392. ret = -ENOMEM;
  1393. goto out;
  1394. }
  1395. ret = hgsl_hyp_get_shadowts_mem(hab_channel, ctxt->context_id,
  1396. &ctxt->shadow_ts_flags, ctxt->shadow_ts_node);
  1397. if (ret)
  1398. goto out;
  1399. dma_buf = ctxt->shadow_ts_node->dma_buf;
  1400. if (dma_buf) {
  1401. dma_buf_begin_cpu_access(dma_buf, DMA_FROM_DEVICE);
  1402. ret = dma_buf_vmap(dma_buf, &ctxt->map);
  1403. if (ret)
  1404. goto out;
  1405. ctxt->shadow_ts = (struct shadow_ts *)ctxt->map.vaddr;
  1406. }
  1407. LOGD("0x%llx, 0x%llx", (uint64_t)ctxt, (uint64_t)ctxt->map.vaddr);
  1408. out:
  1409. if (ret)
  1410. _cleanup_shadow(hab_channel, ctxt);
  1411. }
  1412. static int hgsl_ioctl_get_shadowts_mem(struct file *filep, unsigned long arg)
  1413. {
  1414. struct hgsl_priv *priv = filep->private_data;
  1415. struct hgsl_ioctl_get_shadowts_mem_params params;
  1416. struct hgsl_context *ctxt = NULL;
  1417. struct dma_buf *dma_buf = NULL;
  1418. int ret = 0;
  1419. if (copy_from_user(&params, USRPTR(arg), sizeof(params))) {
  1420. LOGE("failed to copy params from user");
  1421. ret = -EFAULT;
  1422. goto out;
  1423. }
  1424. ctxt = hgsl_get_context_owner(priv, params.ctxthandle);
  1425. if (ctxt == NULL) {
  1426. ret = -EINVAL;
  1427. goto out;
  1428. }
  1429. if (!ctxt->shadow_ts_node) {
  1430. ret = -ENODEV;
  1431. goto out;
  1432. }
  1433. params.flags = ctxt->shadow_ts_flags;
  1434. params.size = ctxt->shadow_ts_node->memdesc.size64;
  1435. params.fd = -1;
  1436. dma_buf = ctxt->shadow_ts_node->dma_buf;
  1437. if (dma_buf) {
  1438. /* increase reference count before install fd. */
  1439. get_dma_buf(dma_buf);
  1440. params.fd = dma_buf_fd(dma_buf, O_CLOEXEC);
  1441. if (params.fd < 0) {
  1442. LOGE("dma buf to fd failed\n");
  1443. ret = -ENOMEM;
  1444. dma_buf_put(dma_buf);
  1445. goto out;
  1446. }
  1447. }
  1448. if (copy_to_user(USRPTR(arg), &params, sizeof(params))) {
  1449. ret = -EFAULT;
  1450. }
  1451. out:
  1452. hgsl_put_context(ctxt);
  1453. return ret;
  1454. }
  1455. static int hgsl_ioctl_put_shadowts_mem(struct file *filep, unsigned long arg)
  1456. {
  1457. struct hgsl_priv *priv = filep->private_data;
  1458. struct hgsl_ioctl_put_shadowts_mem_params params;
  1459. int ret = 0;
  1460. if (copy_from_user(&params, USRPTR(arg), sizeof(params))) {
  1461. LOGE("failed to copy params from user");
  1462. ret = -EFAULT;
  1463. goto out;
  1464. }
  1465. ret = hgsl_check_context_owner(priv, params.ctxthandle);
  1466. /* return OK and keep shadow ts until we destroy context*/
  1467. out:
  1468. return ret;
  1469. }
  1470. static bool dbq_check_ibdesc_state(struct qcom_hgsl *hgsl,
  1471. struct hgsl_context *ctxt, uint32_t request_type)
  1472. {
  1473. struct doorbell_queue *dbq = ctxt->dbq;
  1474. bool wait_required = false;
  1475. if (dbq == NULL || !dbq->ibdesc_priv.buf_inuse)
  1476. return wait_required;
  1477. if (request_type == HGSL_DBQ_IBDESC_REQUEST_RELEASE) {
  1478. if (ctxt->context_id == dbq->ibdesc_priv.context_id)
  1479. wait_required = true;
  1480. } else if (request_type == HGSL_DBQ_IBDESC_REQUEST_ACQUIRE)
  1481. wait_required = true;
  1482. return wait_required;
  1483. }
  1484. static int dbq_wait_free_ibdesc(struct qcom_hgsl *hgsl,
  1485. struct hgsl_context *context, uint32_t request_type,
  1486. uint32_t wait_type)
  1487. {
  1488. struct hgsl_context *ctxt = NULL;
  1489. struct doorbell_queue *dbq = context->dbq;
  1490. signed long start;
  1491. bool expired = false;
  1492. int timeout = 0;
  1493. int ret = 0;
  1494. if (!dbq_check_ibdesc_state(hgsl, context, request_type))
  1495. return 0;
  1496. ctxt = hgsl_get_context(hgsl, dbq->ibdesc_priv.context_id);
  1497. if (!ctxt) {
  1498. LOGE("Invalid context id %d\n", dbq->ibdesc_priv.context_id);
  1499. return -EINVAL;
  1500. }
  1501. if (wait_type == HGSL_DBQ_IBDESC_SHORT_WAIT)
  1502. timeout = msecs_to_jiffies(HGSL_DBQ_IBDESC_SHORT_WAIT_MSEC);
  1503. else if (wait_type == HGSL_DBQ_IBDESC_LONG_WAIT)
  1504. timeout = msecs_to_jiffies(HGSL_DBQ_IBDESC_LONG_WAIT_MSEC);
  1505. start = jiffies;
  1506. do {
  1507. ret = hgsl_check_shadow_timestamp(ctxt, GSL_TIMESTAMP_RETIRED,
  1508. dbq->ibdesc_priv.timestamp, &expired);
  1509. if (ret || expired)
  1510. break;
  1511. mutex_unlock(&dbq->lock);
  1512. if (msleep_interruptible(1))
  1513. ret = -EINTR;
  1514. mutex_lock(&dbq->lock);
  1515. if (ret == -EINTR)
  1516. break;
  1517. } while ((jiffies - start) < timeout);
  1518. if (expired)
  1519. dbq->ibdesc_priv.buf_inuse = false;
  1520. else {
  1521. if (ret && ret != -EINTR && ret != -EAGAIN)
  1522. LOGE("Wait to free ibdesc failed %d", ret);
  1523. if (!ret)
  1524. ret = -EAGAIN;
  1525. }
  1526. hgsl_put_context(ctxt);
  1527. return ret;
  1528. }
  1529. static int dbcq_get_free_indirect_ib_buffer(struct hgsl_priv *priv,
  1530. struct hgsl_context *ctxt,
  1531. uint32_t ts, uint32_t timeout_in_ms)
  1532. {
  1533. int ret = 0;
  1534. struct qcom_hgsl *hgsl = priv->dev;
  1535. struct doorbell_context_queue *dbcq = ctxt->dbcq;
  1536. struct hgsl_wait_ts_info wait_ts_info = { 0 };
  1537. bool expired = false;
  1538. if (dbcq->indirect_ib_ts != 0x0U) {
  1539. ret = hgsl_check_shadow_timestamp(ctxt, GSL_TIMESTAMP_RETIRED,
  1540. dbcq->indirect_ib_ts, &expired);
  1541. if (!ret && expired) {
  1542. // already retired, go out to set indirect_ib_ts to claim the buffer.
  1543. goto out;
  1544. }
  1545. /* Populate the hgsl structure parameters*/
  1546. wait_ts_info.devhandle = ctxt->devhandle;
  1547. wait_ts_info.context_id = ctxt->context_id;
  1548. wait_ts_info.timestamp = dbcq->indirect_ib_ts;
  1549. wait_ts_info.timeout = timeout_in_ms;
  1550. if (ret)
  1551. ret = hgsl_hyp_wait_timestamp(&priv->hyp_priv, &wait_ts_info);
  1552. else if (timeout_in_ms != 0)
  1553. ret = hgsl_wait_timestamp(hgsl, &wait_ts_info);
  1554. if (ret) {
  1555. if (ret == -ETIMEDOUT) {
  1556. LOGI("Timed out waiting for indirect submission buffer %d", ret);
  1557. ret = -EAGAIN;
  1558. }
  1559. return ret;
  1560. }
  1561. }
  1562. out:
  1563. dbcq->indirect_ib_ts = ts;
  1564. return ret;
  1565. }
  1566. static int hgsl_ctxt_create_dbq(struct hgsl_priv *priv,
  1567. struct hgsl_hab_channel_t *hab_channel,
  1568. struct hgsl_context *ctxt, uint32_t dbq_info, bool dbq_info_checked)
  1569. {
  1570. struct qcom_hgsl *hgsl = priv->dev;
  1571. uint32_t dbq_idx;
  1572. uint32_t db_signal;
  1573. uint32_t queue_gmuaddr;
  1574. uint32_t irq_idx;
  1575. int ret;
  1576. /* if backend can support the latest context dbq, then use dbcq */
  1577. ret = hgsl_hyp_query_dbcq(hab_channel, ctxt->devhandle, ctxt->context_id,
  1578. HGSL_CTXT_QUEUE_TOTAL_SIZE, &db_signal, &queue_gmuaddr, &irq_idx);
  1579. if (!ret) {
  1580. hgsl_dbcq_init(priv, ctxt, db_signal, queue_gmuaddr, irq_idx);
  1581. return 0;
  1582. }
  1583. /* otherwise, it may support RPC_CONTEXT_CREATE v1,
  1584. * a valid dbq_info is already returned, then skip the query
  1585. */
  1586. if (!dbq_info_checked) {
  1587. ret = hgsl_hyp_dbq_create(hab_channel,
  1588. ctxt->context_id, &dbq_info);
  1589. if (ret)
  1590. return ret;
  1591. }
  1592. if (dbq_info == -1)
  1593. return -EINVAL;
  1594. dbq_idx = dbq_info >> 16;
  1595. db_signal = dbq_info & 0xFFFF;
  1596. ret = hgsl_dbq_init(hgsl, dbq_idx, db_signal);
  1597. if (ret)
  1598. return ret;
  1599. ctxt->dbq = &hgsl->dbq[dbq_idx];
  1600. ctxt->tcsr_idx = ctxt->dbq->tcsr_idx;
  1601. hgsl_dbq_set_state_info(ctxt->dbq->vbase,
  1602. HGSL_DBQ_METADATA_CONTEXT_INFO,
  1603. ctxt->context_id,
  1604. HGSL_DBQ_CONTEXT_DESTROY_OFFSET_IN_DWORD,
  1605. 0);
  1606. return 0;
  1607. }
  1608. static int hgsl_ctxt_destroy(struct hgsl_priv *priv,
  1609. struct hgsl_hab_channel_t *hab_channel,
  1610. uint32_t context_id, uint32_t *rval, bool can_retry)
  1611. {
  1612. struct hgsl_context *ctxt = NULL;
  1613. int ret;
  1614. bool put_channel = false;
  1615. struct doorbell_queue *dbq = NULL;
  1616. ctxt = hgsl_get_context(priv->dev, context_id);
  1617. if (!ctxt) {
  1618. LOGE("Invalid context id %d\n", context_id);
  1619. ret = -EINVAL;
  1620. goto out;
  1621. }
  1622. dbq = ctxt->dbq;
  1623. if (dbq != NULL) {
  1624. mutex_lock(&dbq->lock);
  1625. /* if ibdesc is held by the context, release it here */
  1626. ret = dbq_wait_free_ibdesc(priv->dev, ctxt,
  1627. HGSL_DBQ_IBDESC_REQUEST_RELEASE,
  1628. HGSL_DBQ_IBDESC_LONG_WAIT);
  1629. if (ret && !can_retry)
  1630. dbq->ibdesc_priv.buf_inuse = false;
  1631. mutex_unlock(&dbq->lock);
  1632. if (ret && can_retry) {
  1633. hgsl_put_context(ctxt);
  1634. goto out;
  1635. }
  1636. }
  1637. hgsl_put_context(ctxt);
  1638. ctxt = hgsl_remove_context(priv, context_id);
  1639. if (!ctxt) {
  1640. LOGE("Invalid context id %d\n", context_id);
  1641. ret = -EINVAL;
  1642. goto out;
  1643. }
  1644. /* unblock all waiting threads on this context */
  1645. ctxt->in_destroy = true;
  1646. wake_up_all(&ctxt->wait_q);
  1647. hgsl_put_context(ctxt);
  1648. while (!ctxt->destroyed)
  1649. cpu_relax();
  1650. if (!hab_channel) {
  1651. ret = hgsl_hyp_channel_pool_get(&priv->hyp_priv, 0, &hab_channel);
  1652. if (ret) {
  1653. LOGE("Failed to get hab channel %d", ret);
  1654. hgsl_free(ctxt);
  1655. goto out;
  1656. }
  1657. put_channel = true;
  1658. }
  1659. if (!ctxt->is_fe_shadow)
  1660. _cleanup_shadow(hab_channel, ctxt);
  1661. ret = hgsl_hyp_ctxt_destroy(hab_channel,
  1662. ctxt->devhandle, ctxt->context_id, rval, ctxt->dbcq_export_id);
  1663. hgsl_dbcq_close(ctxt);
  1664. if (ctxt->is_fe_shadow)
  1665. _cleanup_shadow(hab_channel, ctxt);
  1666. hgsl_free(ctxt);
  1667. out:
  1668. if (put_channel)
  1669. hgsl_hyp_channel_pool_put(hab_channel);
  1670. return ret;
  1671. }
  1672. static inline bool hgsl_ctxt_use_global_dbq(struct hgsl_context *ctxt)
  1673. {
  1674. return ((ctxt != NULL) &&
  1675. (ctxt->shadow_ts != NULL) &&
  1676. ((ctxt->dbq != NULL) || (ctxt->dbcq != NULL)) &&
  1677. (is_global_db(ctxt->tcsr_idx)));
  1678. }
  1679. static inline bool hgsl_ctxt_use_dbq(struct hgsl_context *ctxt)
  1680. {
  1681. return ((ctxt != NULL) &&
  1682. ((ctxt->dbq != NULL) || (ctxt->dbcq != NULL)) &&
  1683. ((ctxt->shadow_ts != NULL) || (!is_global_db(ctxt->tcsr_idx))));
  1684. }
  1685. static int hgsl_ioctl_ctxt_create(struct file *filep, unsigned long arg)
  1686. {
  1687. struct hgsl_priv *priv = filep->private_data;
  1688. struct qcom_hgsl *hgsl = priv->dev;
  1689. struct hgsl_ioctl_ctxt_create_params params;
  1690. struct hgsl_context *ctxt = NULL;
  1691. int ret = 0;
  1692. struct hgsl_hab_channel_t *hab_channel = NULL;
  1693. bool ctxt_created = false;
  1694. bool dbq_off = (!hgsl->global_hyp_inited || hgsl->db_off);
  1695. uint32_t dbq_info = -1;
  1696. bool dbq_info_checked = false;
  1697. if (copy_from_user(&params, USRPTR(arg), sizeof(params))) {
  1698. LOGE("failed to copy params from user");
  1699. ret = -EFAULT;
  1700. return ret;
  1701. }
  1702. ret = hgsl_hyp_channel_pool_get(&priv->hyp_priv, 0, &hab_channel);
  1703. if (ret) {
  1704. LOGE("Failed to get hab channel %d", ret);
  1705. goto out;
  1706. }
  1707. ctxt = hgsl_zalloc(sizeof(*ctxt));
  1708. if (ctxt == NULL) {
  1709. ret = -ENOMEM;
  1710. goto out;
  1711. }
  1712. if (params.flags & GSL_CONTEXT_FLAG_CLIENT_GENERATED_TS)
  1713. params.flags |= GSL_CONTEXT_FLAG_USER_GENERATED_TS;
  1714. if (params.flags & GSL_CONTEXT_FLAG_BIND) {
  1715. params.flags &= ~GSL_CONTEXT_FLAG_CLIENT_GENERATED_TS;
  1716. params.flags |= GSL_CONTEXT_FLAG_USER_GENERATED_TS;
  1717. }
  1718. ret = hgsl_hyp_ctxt_create_v1(hgsl->dev, priv, hab_channel,
  1719. ctxt, &params, dbq_off, &dbq_info);
  1720. if (ret) {
  1721. /* fallback to legacy mode */
  1722. ret = hgsl_hyp_ctxt_create(hab_channel, &params);
  1723. if (ret)
  1724. goto out;
  1725. if (params.ctxthandle >= HGSL_CONTEXT_NUM) {
  1726. LOGE("invalid ctxt id %d", params.ctxthandle);
  1727. ret = -EINVAL;
  1728. goto out;
  1729. }
  1730. ctxt->context_id = params.ctxthandle;
  1731. ctxt->devhandle = params.devhandle;
  1732. ctxt->pid = priv->pid;
  1733. ctxt->priv = priv;
  1734. ctxt->flags = params.flags;
  1735. } else
  1736. dbq_info_checked = true;
  1737. kref_init(&ctxt->kref);
  1738. init_waitqueue_head(&ctxt->wait_q);
  1739. mutex_init(&ctxt->lock);
  1740. hgsl_get_shadowts_mem(hab_channel, ctxt);
  1741. if (!dbq_off)
  1742. hgsl_ctxt_create_dbq(priv, hab_channel, ctxt, dbq_info, dbq_info_checked);
  1743. if (hgsl_ctxt_use_global_dbq(ctxt)) {
  1744. ret = hgsl_hsync_timeline_create(ctxt);
  1745. if (ret < 0)
  1746. LOGE("hsync timeline failed for context %d", params.ctxthandle);
  1747. }
  1748. if (ctxt->timeline)
  1749. params.sync_type = HGSL_SYNC_TYPE_HSYNC;
  1750. else
  1751. params.sync_type = HGSL_SYNC_TYPE_ISYNC;
  1752. write_lock(&hgsl->ctxt_lock);
  1753. if (hgsl->contexts[ctxt->context_id] != NULL) {
  1754. LOGE("context id %d already created",
  1755. ctxt->context_id);
  1756. ret = -EBUSY;
  1757. write_unlock(&hgsl->ctxt_lock);
  1758. goto out;
  1759. }
  1760. hgsl->contexts[ctxt->context_id] = ctxt;
  1761. write_unlock(&hgsl->ctxt_lock);
  1762. ctxt_created = true;
  1763. if (copy_to_user(USRPTR(arg), &params, sizeof(params))) {
  1764. ret = -EFAULT;
  1765. goto out;
  1766. }
  1767. out:
  1768. LOGD("%d", params.ctxthandle);
  1769. if (ret) {
  1770. if (ctxt_created)
  1771. hgsl_ctxt_destroy(priv, hab_channel, params.ctxthandle, NULL, false);
  1772. else if (ctxt && (params.ctxthandle < HGSL_CONTEXT_NUM)) {
  1773. if (!ctxt->is_fe_shadow)
  1774. _cleanup_shadow(hab_channel, ctxt);
  1775. hgsl_hyp_ctxt_destroy(hab_channel, ctxt->devhandle, ctxt->context_id,
  1776. NULL, ctxt->dbcq_export_id);
  1777. hgsl_dbcq_close(ctxt);
  1778. if (ctxt->is_fe_shadow)
  1779. _cleanup_shadow(hab_channel, ctxt);
  1780. kfree(ctxt);
  1781. }
  1782. LOGE("failed to create context");
  1783. }
  1784. hgsl_hyp_channel_pool_put(hab_channel);
  1785. return ret;
  1786. }
  1787. static int hgsl_ioctl_ctxt_destroy(struct file *filep, unsigned long arg)
  1788. {
  1789. struct hgsl_priv *priv = filep->private_data;
  1790. struct hgsl_ioctl_ctxt_destroy_params params;
  1791. struct hgsl_hab_channel_t *hab_channel = NULL;
  1792. int ret;
  1793. ret = hgsl_hyp_channel_pool_get(&priv->hyp_priv, 0, &hab_channel);
  1794. if (ret) {
  1795. LOGE("Failed to get hab channel %d", ret);
  1796. goto out;
  1797. }
  1798. if (copy_from_user(&params, USRPTR(arg), sizeof(params))) {
  1799. LOGE("failed to copy params from user\n");
  1800. ret = -EFAULT;
  1801. goto out;
  1802. }
  1803. ret = hgsl_ctxt_destroy(priv, hab_channel, params.ctxthandle, &params.rval, true);
  1804. if (ret == 0) {
  1805. if (copy_to_user(USRPTR(arg), &params, sizeof(params)))
  1806. ret = -EFAULT;
  1807. }
  1808. out:
  1809. hgsl_hyp_channel_pool_put(hab_channel);
  1810. return ret;
  1811. }
  1812. static int hgsl_wait_timestamp(struct qcom_hgsl *hgsl,
  1813. struct hgsl_wait_ts_info *param)
  1814. {
  1815. struct hgsl_active_wait *wait = NULL;
  1816. struct hgsl_context *ctxt = hgsl_get_context(hgsl, param->context_id);
  1817. unsigned int timestamp;
  1818. int ret;
  1819. if (ctxt == NULL) {
  1820. LOGE("Invalid context id %d\n", param->context_id);
  1821. ret = -EINVAL;
  1822. goto out;
  1823. }
  1824. if (!hgsl_ctxt_use_global_dbq(ctxt)) {
  1825. ret = -EPERM;
  1826. goto out;
  1827. }
  1828. timestamp = param->timestamp;
  1829. wait = kzalloc(sizeof(*wait), GFP_KERNEL);
  1830. if (!wait) {
  1831. ret = -ENOMEM;
  1832. goto out;
  1833. }
  1834. wait->ctxt = ctxt;
  1835. wait->timestamp = timestamp;
  1836. spin_lock(&hgsl->active_wait_lock);
  1837. list_add_tail(&wait->head, &hgsl->active_wait_list);
  1838. spin_unlock(&hgsl->active_wait_lock);
  1839. ret = wait_event_interruptible_timeout(ctxt->wait_q,
  1840. _timestamp_retired(ctxt, timestamp) ||
  1841. ctxt->in_destroy,
  1842. msecs_to_jiffies(param->timeout));
  1843. if (ret == 0)
  1844. ret = -ETIMEDOUT;
  1845. else if (ret == -ERESTARTSYS)
  1846. /* Let user handle this */
  1847. ret = -EINTR;
  1848. else
  1849. ret = 0;
  1850. spin_lock(&hgsl->active_wait_lock);
  1851. list_del(&wait->head);
  1852. spin_unlock(&hgsl->active_wait_lock);
  1853. out:
  1854. hgsl_put_context(ctxt);
  1855. kfree(wait);
  1856. return ret;
  1857. }
  1858. static int hgsl_ioctl_hyp_generic_transaction(struct file *filep,
  1859. unsigned long arg)
  1860. {
  1861. struct hgsl_priv *priv = filep->private_data;
  1862. struct hgsl_ioctl_hyp_generic_transaction_params params;
  1863. void *pSend[HGSL_HYP_GENERAL_MAX_SEND_NUM];
  1864. void *pReply[HGSL_HYP_GENERAL_MAX_REPLY_NUM];
  1865. unsigned int i = 0;
  1866. int ret = 0;
  1867. int ret_value = 0;
  1868. int *pRval = NULL;
  1869. memset(pSend, 0, sizeof(pSend));
  1870. memset(pReply, 0, sizeof(pReply));
  1871. if (copy_from_user(&params, USRPTR(arg), sizeof(params))) {
  1872. LOGE("failed to copy params from user\n");
  1873. ret = -EFAULT;
  1874. goto out;
  1875. }
  1876. if ((params.send_num > HGSL_HYP_GENERAL_MAX_SEND_NUM) ||
  1877. (params.reply_num > HGSL_HYP_GENERAL_MAX_REPLY_NUM)) {
  1878. ret = -EINVAL;
  1879. LOGE("invalid Send %d or reply %d number\n",
  1880. params.send_num, params.reply_num);
  1881. goto out;
  1882. }
  1883. for (i = 0; i < params.send_num; i++) {
  1884. if ((params.send_size[i] > HGSL_HYP_GENERAL_MAX_SIZE) ||
  1885. (params.send_size[i] == 0)) {
  1886. LOGE("Invalid size 0x%x for %d\n", params.send_size[i], i);
  1887. ret = -EINVAL;
  1888. goto out;
  1889. } else {
  1890. pSend[i] = hgsl_malloc(params.send_size[i]);
  1891. if (pSend[i] == NULL) {
  1892. ret = -ENOMEM;
  1893. goto out;
  1894. }
  1895. if (copy_from_user(pSend[i],
  1896. USRPTR(params.send_data[i]),
  1897. params.send_size[i])) {
  1898. LOGE("Failed to copy send data %d\n", i);
  1899. ret = -EFAULT;
  1900. goto out;
  1901. }
  1902. }
  1903. }
  1904. for (i = 0; i < params.reply_num; i++) {
  1905. if ((params.reply_size[i] > HGSL_HYP_GENERAL_MAX_SIZE) ||
  1906. (params.reply_size[i] == 0)) {
  1907. ret = -EINVAL;
  1908. goto out;
  1909. } else {
  1910. pReply[i] = hgsl_malloc(params.reply_size[i]);
  1911. if (pReply[i] == NULL) {
  1912. ret = -ENOMEM;
  1913. goto out;
  1914. }
  1915. memset(pReply[i], 0, params.reply_size[i]);
  1916. }
  1917. }
  1918. if (params.ret_value)
  1919. pRval = &ret_value;
  1920. ret = hgsl_hyp_generic_transaction(&priv->hyp_priv,
  1921. &params, pSend, pReply, pRval);
  1922. if (ret == 0) {
  1923. for (i = 0; i < params.reply_num; i++) {
  1924. if (copy_to_user(USRPTR(params.reply_data[i]),
  1925. pReply[i], params.reply_size[i])) {
  1926. ret = -EFAULT;
  1927. goto out;
  1928. }
  1929. }
  1930. if (params.ret_value) {
  1931. if (copy_to_user(USRPTR(params.ret_value),
  1932. &ret_value, sizeof(ret_value)))
  1933. ret = -EFAULT;
  1934. }
  1935. }
  1936. out:
  1937. for (i = 0; i < HGSL_HYP_GENERAL_MAX_SEND_NUM; i++)
  1938. hgsl_free(pSend[i]);
  1939. for (i = 0; i < HGSL_HYP_GENERAL_MAX_REPLY_NUM; i++)
  1940. hgsl_free(pReply[i]);
  1941. return ret;
  1942. }
  1943. static int hgsl_ioctl_mem_alloc(struct file *filep, unsigned long arg)
  1944. {
  1945. struct hgsl_priv *priv = filep->private_data;
  1946. struct hgsl_ioctl_mem_alloc_params params;
  1947. struct qcom_hgsl *hgsl = priv->dev;
  1948. int ret = 0;
  1949. struct hgsl_mem_node *mem_node = NULL;
  1950. struct hgsl_hab_channel_t *hab_channel = NULL;
  1951. ret = hgsl_hyp_channel_pool_get(&priv->hyp_priv, 0, &hab_channel);
  1952. if (ret) {
  1953. LOGE("Failed to get hab channel %d", ret);
  1954. goto out;
  1955. }
  1956. if (copy_from_user(&params, USRPTR(arg), sizeof(params))) {
  1957. LOGE("failed to copy params from user");
  1958. ret = -EFAULT;
  1959. goto out;
  1960. }
  1961. if (params.sizebytes == 0) {
  1962. LOGE("requested size is 0");
  1963. ret = -EINVAL;
  1964. goto out;
  1965. }
  1966. mem_node = hgsl_mem_node_zalloc(hgsl->default_iocoherency);
  1967. if (mem_node == NULL) {
  1968. ret = -ENOMEM;
  1969. goto out;
  1970. }
  1971. /* let the back end aware that this is HGSL allocation */
  1972. params.flags &= ~GSL_MEMFLAGS_USERMEM_MASK;
  1973. params.flags |= GSL_MEMFLAGS_USERMEM_HGSL_ALLOC;
  1974. mem_node->flags = params.flags;
  1975. ret = hgsl_sharedmem_alloc(hgsl->dev, params.sizebytes, params.flags, mem_node);
  1976. if (ret)
  1977. goto out;
  1978. ret = hgsl_hyp_mem_map_smmu(hab_channel, mem_node->memdesc.size, 0, mem_node);
  1979. LOGD("%d, %d, gpuaddr 0x%llx",
  1980. ret, mem_node->export_id, mem_node->memdesc.gpuaddr);
  1981. if (ret)
  1982. goto out;
  1983. /* increase reference count before install fd. */
  1984. get_dma_buf(mem_node->dma_buf);
  1985. params.fd = dma_buf_fd(mem_node->dma_buf, O_CLOEXEC);
  1986. if (params.fd < 0) {
  1987. LOGE("dma_buf_fd failed, size 0x%x", mem_node->memdesc.size);
  1988. ret = -EINVAL;
  1989. dma_buf_put(mem_node->dma_buf);
  1990. goto out;
  1991. }
  1992. if (copy_to_user(USRPTR(arg), &params, sizeof(params))) {
  1993. ret = -EFAULT;
  1994. goto out;
  1995. }
  1996. if (copy_to_user(USRPTR(params.memdesc),
  1997. &mem_node->memdesc, sizeof(mem_node->memdesc))) {
  1998. ret = -EFAULT;
  1999. goto out;
  2000. }
  2001. mutex_lock(&priv->lock);
  2002. list_add(&mem_node->node, &priv->mem_allocated);
  2003. hgsl_trace_gpu_mem_total(priv, mem_node->memdesc.size64);
  2004. mutex_unlock(&priv->lock);
  2005. out:
  2006. if (ret && mem_node) {
  2007. hgsl_hyp_mem_unmap_smmu(hab_channel, mem_node);
  2008. hgsl_sharedmem_free(mem_node);
  2009. }
  2010. hgsl_hyp_channel_pool_put(hab_channel);
  2011. return ret;
  2012. }
  2013. static int hgsl_ioctl_mem_free(struct file *filep, unsigned long arg)
  2014. {
  2015. struct hgsl_priv *priv = filep->private_data;
  2016. struct hgsl_ioctl_mem_free_params params;
  2017. struct gsl_memdesc_t memdesc;
  2018. int ret = 0;
  2019. struct hgsl_mem_node *node_found = NULL;
  2020. struct hgsl_mem_node *tmp = NULL;
  2021. struct hgsl_hab_channel_t *hab_channel = NULL;
  2022. ret = hgsl_hyp_channel_pool_get(&priv->hyp_priv, 0, &hab_channel);
  2023. if (ret) {
  2024. LOGE("Failed to get hab channel %d", ret);
  2025. goto out;
  2026. }
  2027. if (copy_from_user(&params, USRPTR(arg), sizeof(params))) {
  2028. LOGE("failed to copy params from user");
  2029. ret = -EFAULT;
  2030. goto out;
  2031. }
  2032. if (copy_from_user(&memdesc, USRPTR(params.memdesc),
  2033. sizeof(memdesc))) {
  2034. LOGE("failed to copy memdesc from user");
  2035. ret = -EFAULT;
  2036. goto out;
  2037. }
  2038. mutex_lock(&priv->lock);
  2039. list_for_each_entry(tmp, &priv->mem_allocated, node) {
  2040. if ((tmp->memdesc.gpuaddr == memdesc.gpuaddr)
  2041. && (tmp->memdesc.size == memdesc.size)) {
  2042. node_found = tmp;
  2043. list_del(&node_found->node);
  2044. break;
  2045. }
  2046. }
  2047. mutex_unlock(&priv->lock);
  2048. if (node_found) {
  2049. ret = hgsl_hyp_mem_unmap_smmu(hab_channel, node_found);
  2050. if (!ret) {
  2051. hgsl_trace_gpu_mem_total(priv,
  2052. -(node_found->memdesc.size64));
  2053. hgsl_sharedmem_free(node_found);
  2054. } else {
  2055. LOGE("hgsl_hyp_mem_unmap_smmu failed %d", ret);
  2056. mutex_lock(&priv->lock);
  2057. list_add(&node_found->node, &priv->mem_allocated);
  2058. mutex_unlock(&priv->lock);
  2059. }
  2060. } else {
  2061. LOGE("can't find the memory 0x%llx, 0x%x",
  2062. memdesc.gpuaddr, memdesc.size);
  2063. goto out;
  2064. }
  2065. out:
  2066. hgsl_hyp_channel_pool_put(hab_channel);
  2067. return ret;
  2068. }
  2069. static int hgsl_ioctl_set_metainfo(struct file *filep, unsigned long arg)
  2070. {
  2071. struct hgsl_priv *priv = filep->private_data;
  2072. struct hgsl_ioctl_set_metainfo_params params;
  2073. int ret = 0;
  2074. struct hgsl_mem_node *mem_node = NULL;
  2075. struct hgsl_mem_node *tmp = NULL;
  2076. char metainfo[HGSL_MEM_META_MAX_SIZE] = {0};
  2077. if (copy_from_user(&params, USRPTR(arg), sizeof(params))) {
  2078. LOGE("failed to copy params from user");
  2079. ret = -EFAULT;
  2080. goto out;
  2081. }
  2082. if (params.metainfo_len > HGSL_MEM_META_MAX_SIZE) {
  2083. LOGE("metainfo_len %d exceeded max", params.metainfo_len);
  2084. ret = -EINVAL;
  2085. goto out;
  2086. }
  2087. if (copy_from_user(metainfo, USRPTR(params.metainfo),
  2088. params.metainfo_len)) {
  2089. LOGE("failed to copy metainfo from user");
  2090. ret = -EFAULT;
  2091. goto out;
  2092. }
  2093. metainfo[HGSL_MEM_META_MAX_SIZE - 1] = '\0';
  2094. mutex_lock(&priv->lock);
  2095. list_for_each_entry(tmp, &priv->mem_allocated, node) {
  2096. if (tmp->memdesc.priv64 == params.memdesc_priv) {
  2097. mem_node = tmp;
  2098. break;
  2099. }
  2100. }
  2101. if (mem_node) {
  2102. strscpy(mem_node->metainfo, metainfo,
  2103. sizeof(mem_node->metainfo));
  2104. }
  2105. mutex_unlock(&priv->lock);
  2106. if (!mem_node) {
  2107. LOGE("Failed to find the requested memory");
  2108. ret = -EINVAL;
  2109. goto out;
  2110. }
  2111. ret = hgsl_hyp_set_metainfo(&priv->hyp_priv, &params, metainfo);
  2112. out:
  2113. return ret;
  2114. }
  2115. static int hgsl_ioctl_mem_map_smmu(struct file *filep, unsigned long arg)
  2116. {
  2117. struct hgsl_priv *priv = filep->private_data;
  2118. struct qcom_hgsl *hgsl = priv->dev;
  2119. struct hgsl_ioctl_mem_map_smmu_params params;
  2120. int ret = 0;
  2121. struct hgsl_mem_node *mem_node = NULL;
  2122. struct hgsl_hab_channel_t *hab_channel = NULL;
  2123. ret = hgsl_hyp_channel_pool_get(&priv->hyp_priv, 0, &hab_channel);
  2124. if (ret) {
  2125. LOGE("Failed to get hab channel %d", ret);
  2126. goto out;
  2127. }
  2128. if (copy_from_user(&params, USRPTR(arg), sizeof(params))) {
  2129. LOGE("failed to copy params from user");
  2130. ret = -EFAULT;
  2131. goto out;
  2132. }
  2133. mem_node = hgsl_mem_node_zalloc(hgsl->default_iocoherency);
  2134. if (mem_node == NULL) {
  2135. ret = -ENOMEM;
  2136. goto out;
  2137. }
  2138. params.size = PAGE_ALIGN(params.size);
  2139. params.flags &= ~GSL_MEMFLAGS_USERMEM_MASK;
  2140. mem_node->flags = params.flags;
  2141. mem_node->fd = params.fd;
  2142. mem_node->memtype = params.memtype;
  2143. ret = hgsl_hyp_mem_map_smmu(hab_channel, params.size, params.offset, mem_node);
  2144. if (ret == 0) {
  2145. if (copy_to_user(USRPTR(arg), &params, sizeof(params))) {
  2146. ret = -EFAULT;
  2147. goto out;
  2148. }
  2149. if (copy_to_user(USRPTR(params.memdesc), &mem_node->memdesc,
  2150. sizeof(mem_node->memdesc))) {
  2151. ret = -EFAULT;
  2152. goto out;
  2153. }
  2154. mutex_lock(&priv->lock);
  2155. list_add(&mem_node->node, &priv->mem_mapped);
  2156. hgsl_trace_gpu_mem_total(priv, mem_node->memdesc.size64);
  2157. mutex_unlock(&priv->lock);
  2158. }
  2159. out:
  2160. if (ret) {
  2161. hgsl_hyp_mem_unmap_smmu(hab_channel, mem_node);
  2162. hgsl_free(mem_node);
  2163. }
  2164. hgsl_hyp_channel_pool_put(hab_channel);
  2165. return ret;
  2166. }
  2167. static int hgsl_ioctl_mem_unmap_smmu(struct file *filep, unsigned long arg)
  2168. {
  2169. struct hgsl_priv *priv = filep->private_data;
  2170. struct hgsl_ioctl_mem_unmap_smmu_params params;
  2171. int ret = 0;
  2172. struct hgsl_mem_node *node_found = NULL;
  2173. struct hgsl_mem_node *tmp = NULL;
  2174. struct hgsl_hab_channel_t *hab_channel = NULL;
  2175. ret = hgsl_hyp_channel_pool_get(&priv->hyp_priv, 0, &hab_channel);
  2176. if (ret) {
  2177. LOGE("Failed to get hab channel %d", ret);
  2178. goto out;
  2179. }
  2180. if (copy_from_user(&params, USRPTR(arg), sizeof(params))) {
  2181. LOGE("failed to copy params from user");
  2182. ret = -EFAULT;
  2183. goto out;
  2184. }
  2185. mutex_lock(&priv->lock);
  2186. list_for_each_entry(tmp, &priv->mem_mapped, node) {
  2187. if ((tmp->memdesc.gpuaddr == params.gpuaddr)
  2188. && (tmp->memdesc.size == params.size)) {
  2189. node_found = tmp;
  2190. list_del(&node_found->node);
  2191. break;
  2192. }
  2193. }
  2194. mutex_unlock(&priv->lock);
  2195. if (node_found) {
  2196. hgsl_put_sgt(node_found, false);
  2197. ret = hgsl_hyp_mem_unmap_smmu(hab_channel, node_found);
  2198. if (ret) {
  2199. mutex_lock(&priv->lock);
  2200. list_add(&node_found->node, &priv->mem_mapped);
  2201. mutex_unlock(&priv->lock);
  2202. } else {
  2203. hgsl_trace_gpu_mem_total(priv,
  2204. -(node_found->memdesc.size64));
  2205. hgsl_free(node_found);
  2206. }
  2207. } else {
  2208. ret = -EINVAL;
  2209. }
  2210. out:
  2211. hgsl_hyp_channel_pool_put(hab_channel);
  2212. return ret;
  2213. }
  2214. static int hgsl_ioctl_mem_cache_operation(struct file *filep, unsigned long arg)
  2215. {
  2216. struct hgsl_priv *priv = filep->private_data;
  2217. struct hgsl_ioctl_mem_cache_operation_params params;
  2218. struct qcom_hgsl *hgsl = priv->dev;
  2219. struct hgsl_mem_node *node_found = NULL;
  2220. int ret = 0;
  2221. uint64_t gpuaddr = 0;
  2222. bool internal = false;
  2223. if (copy_from_user(&params, USRPTR(arg), sizeof(params))) {
  2224. LOGE("failed to copy params from user");
  2225. ret = -EFAULT;
  2226. goto out;
  2227. }
  2228. gpuaddr = params.gpuaddr + params.offsetbytes;
  2229. if ((gpuaddr < params.gpuaddr) || ((gpuaddr + params.sizebytes) <= gpuaddr)) {
  2230. ret = -EINVAL;
  2231. goto out;
  2232. }
  2233. mutex_lock(&priv->lock);
  2234. node_found = hgsl_mem_find_base_locked(&priv->mem_allocated,
  2235. gpuaddr, params.sizebytes);
  2236. if (node_found)
  2237. internal = true;
  2238. else {
  2239. node_found = hgsl_mem_find_base_locked(&priv->mem_mapped,
  2240. gpuaddr, params.sizebytes);
  2241. if (!node_found) {
  2242. LOGE("failed to find node %d", ret);
  2243. ret = -EINVAL;
  2244. mutex_unlock(&priv->lock);
  2245. goto out;
  2246. }
  2247. }
  2248. ret = hgsl_mem_cache_op(hgsl->dev, node_found, internal,
  2249. gpuaddr - node_found->memdesc.gpuaddr, params.sizebytes, params.operation);
  2250. mutex_unlock(&priv->lock);
  2251. out:
  2252. if (ret)
  2253. LOGE("ret %d", ret);
  2254. return ret;
  2255. }
  2256. static int hgsl_ioctl_mem_get_fd(struct file *filep, unsigned long arg)
  2257. {
  2258. struct hgsl_priv *priv = filep->private_data;
  2259. struct hgsl_ioctl_mem_get_fd_params params;
  2260. struct gsl_memdesc_t memdesc;
  2261. struct hgsl_mem_node *node_found = NULL;
  2262. struct hgsl_mem_node *tmp = NULL;
  2263. int ret = 0;
  2264. if (copy_from_user(&params, USRPTR(arg), sizeof(params))) {
  2265. LOGE("failed to copy params from user");
  2266. ret = -EFAULT;
  2267. goto out;
  2268. }
  2269. if (copy_from_user(&memdesc, USRPTR(params.memdesc),
  2270. sizeof(memdesc))) {
  2271. LOGE("failed to copy memdesc from user");
  2272. ret = -EFAULT;
  2273. goto out;
  2274. }
  2275. mutex_lock(&priv->lock);
  2276. list_for_each_entry(tmp, &priv->mem_allocated, node) {
  2277. if ((tmp->memdesc.gpuaddr == memdesc.gpuaddr)
  2278. && (tmp->memdesc.size == memdesc.size)) {
  2279. node_found = tmp;
  2280. break;
  2281. }
  2282. }
  2283. params.fd = -1;
  2284. if (node_found && node_found->dma_buf) {
  2285. get_dma_buf(node_found->dma_buf);
  2286. params.fd = dma_buf_fd(node_found->dma_buf, O_CLOEXEC);
  2287. if (params.fd < 0) {
  2288. LOGE("dma buf to fd failed");
  2289. ret = -EINVAL;
  2290. dma_buf_put(node_found->dma_buf);
  2291. } else if (copy_to_user(USRPTR(arg), &params, sizeof(params))) {
  2292. LOGE("copy_to_user failed");
  2293. ret = -EFAULT;
  2294. }
  2295. } else {
  2296. LOGE("can't find the memory 0x%llx, 0x%x, node_found:%p",
  2297. memdesc.gpuaddr, memdesc.size, node_found);
  2298. ret = -EINVAL;
  2299. }
  2300. mutex_unlock(&priv->lock);
  2301. out:
  2302. return ret;
  2303. }
  2304. static int hgsl_db_issueib_with_alloc_list(struct hgsl_priv *priv,
  2305. struct hgsl_ioctl_issueib_with_alloc_list_params *param,
  2306. struct gsl_command_buffer_object_t *ib,
  2307. struct gsl_memory_object_t *allocations,
  2308. struct gsl_memdesc_t *be_descs,
  2309. uint64_t *be_offsets,
  2310. uint32_t *timestamp)
  2311. {
  2312. struct qcom_hgsl *hgsl = priv->dev;
  2313. struct hgsl_context *ctxt = hgsl_get_context(hgsl, param->ctxthandle);
  2314. int ret = 0;
  2315. struct hgsl_fw_ib_desc *ib_descs = NULL;
  2316. uint32_t gmu_flags = CMDBATCH_NOTIFY;
  2317. uint32_t i;
  2318. uint64_t user_profile_gpuaddr = 0;
  2319. if (!hgsl_ctxt_use_dbq(ctxt)) {
  2320. ret = -EPERM;
  2321. goto out;
  2322. }
  2323. ib_descs = hgsl_malloc(sizeof(*ib_descs) * param->num_ibs);
  2324. if (ib_descs == NULL) {
  2325. LOGE("Out of memory");
  2326. ret = -ENOMEM;
  2327. goto out;
  2328. }
  2329. for (i = 0; i < param->num_ibs; i++) {
  2330. ib_descs[i].addr = be_descs[i].gpuaddr + ib[i].offset + be_offsets[i];
  2331. ib_descs[i].sz = ib[i].sizedwords << 2;
  2332. }
  2333. for (i = 0; i < param->num_allocations; i++) {
  2334. if (allocations[i].flags & GSL_IBDESC_PROFILING_BUFFER) {
  2335. user_profile_gpuaddr =
  2336. be_descs[i + param->num_ibs].gpuaddr +
  2337. allocations[i].offset +
  2338. be_offsets[i + param->num_ibs];
  2339. gmu_flags |= CMDBATCH_PROFILING;
  2340. break;
  2341. }
  2342. }
  2343. ret = hgsl_db_issue_cmd(priv, ctxt, param->num_ibs, gmu_flags,
  2344. timestamp, ib_descs, user_profile_gpuaddr);
  2345. out:
  2346. hgsl_put_context(ctxt);
  2347. hgsl_free(ib_descs);
  2348. return ret;
  2349. }
  2350. static int hgsl_db_issueib(struct hgsl_priv *priv,
  2351. struct hgsl_ioctl_issueib_params *param,
  2352. struct hgsl_ibdesc *ibs, uint32_t *timestamp)
  2353. {
  2354. struct qcom_hgsl *hgsl = priv->dev;
  2355. struct hgsl_context *ctxt = hgsl_get_context(hgsl, param->ctxthandle);
  2356. int ret = 0;
  2357. struct hgsl_fw_ib_desc *ib_descs = NULL;
  2358. uint32_t gmu_flags = CMDBATCH_NOTIFY;
  2359. uint32_t i;
  2360. uint64_t user_profile_gpuaddr = 0;
  2361. if (!hgsl_ctxt_use_dbq(ctxt)) {
  2362. ret = -EPERM;
  2363. goto out;
  2364. }
  2365. ib_descs = hgsl_malloc(sizeof(*ib_descs) * param->num_ibs);
  2366. if (ib_descs == NULL) {
  2367. ret = -ENOMEM;
  2368. goto out;
  2369. }
  2370. for (i = 0; i < param->num_ibs; i++) {
  2371. ib_descs[i].addr = ibs[i].gpuaddr;
  2372. ib_descs[i].sz = ibs[i].sizedwords << 2;
  2373. }
  2374. ret = hgsl_db_issue_cmd(priv, ctxt, param->num_ibs, gmu_flags,
  2375. timestamp, ib_descs, user_profile_gpuaddr);
  2376. out:
  2377. hgsl_put_context(ctxt);
  2378. hgsl_free(ib_descs);
  2379. return ret;
  2380. }
  2381. static int hgsl_ioctl_issueib(struct file *filep, unsigned long arg)
  2382. {
  2383. struct hgsl_priv *priv = filep->private_data;
  2384. struct hgsl_ioctl_issueib_params params;
  2385. int ret = 0;
  2386. struct hgsl_ibdesc *ibs = NULL;
  2387. size_t ib_size = 0;
  2388. uint32_t ts = 0;
  2389. bool remote_issueib = false;
  2390. if (copy_from_user(&params, USRPTR(arg), sizeof(params))) {
  2391. LOGE("failed to copy params from user");
  2392. ret = -EFAULT;
  2393. goto out;
  2394. }
  2395. if (params.num_ibs == 0) {
  2396. ret = -EINVAL;
  2397. goto out;
  2398. }
  2399. ret = hgsl_check_context_owner(priv, params.ctxthandle);
  2400. if (ret) {
  2401. LOGE("Invalid context id %d", params.ctxthandle);
  2402. goto out;
  2403. }
  2404. if (params.channel_id > 0) {
  2405. remote_issueib = true;
  2406. } else {
  2407. ib_size = params.num_ibs * sizeof(struct hgsl_ibdesc);
  2408. ibs = hgsl_malloc(ib_size);
  2409. if (ibs == NULL) {
  2410. ret = -ENOMEM;
  2411. goto out;
  2412. }
  2413. if (copy_from_user(ibs, USRPTR(params.ibs), ib_size)) {
  2414. ret = -EFAULT;
  2415. goto out;
  2416. }
  2417. ts = params.timestamp;
  2418. ret = hgsl_db_issueib(priv, &params, ibs, &ts);
  2419. if (!ret) {
  2420. params.rval = GSL_SUCCESS;
  2421. params.timestamp = ts;
  2422. } else if (ret == -EPERM)
  2423. remote_issueib = true;
  2424. }
  2425. if (remote_issueib)
  2426. ret = hgsl_hyp_issueib(&priv->hyp_priv, &params, ibs);
  2427. if (copy_to_user(USRPTR(arg), &params, sizeof(params))) {
  2428. LOGE("failed to copy param to user");
  2429. ret = -EFAULT;
  2430. goto out;
  2431. }
  2432. out:
  2433. hgsl_free(ibs);
  2434. return ret;
  2435. }
  2436. static int hgsl_ioctl_issueib_with_alloc_list(struct file *filep,
  2437. unsigned long arg)
  2438. {
  2439. struct hgsl_priv *priv = filep->private_data;
  2440. struct hgsl_ioctl_issueib_with_alloc_list_params params;
  2441. int ret = 0;
  2442. struct gsl_command_buffer_object_t *ibs = NULL;
  2443. struct gsl_memory_object_t *allocations = NULL;
  2444. size_t ib_size = 0;
  2445. size_t allocation_size = 0;
  2446. size_t be_data_size = 0;
  2447. struct gsl_memdesc_t *be_descs = NULL;
  2448. uint64_t *be_offsets = NULL;
  2449. uint32_t ts = 0;
  2450. bool remote_issueib = false;
  2451. if (copy_from_user(&params, USRPTR(arg), sizeof(params))) {
  2452. LOGE("failed to copy params from user");
  2453. ret = -EFAULT;
  2454. goto out;
  2455. }
  2456. if (params.num_ibs == 0) {
  2457. ret = -EINVAL;
  2458. goto out;
  2459. }
  2460. ret = hgsl_check_context_owner(priv, params.ctxthandle);
  2461. if (ret) {
  2462. LOGE("Invalid context id %d", params.ctxthandle);
  2463. goto out;
  2464. }
  2465. if (params.channel_id > 0) {
  2466. remote_issueib = true;
  2467. } else {
  2468. ib_size = params.num_ibs * sizeof(struct gsl_command_buffer_object_t);
  2469. ibs = hgsl_malloc(ib_size);
  2470. if (ibs == NULL) {
  2471. ret = -ENOMEM;
  2472. goto out;
  2473. }
  2474. if (copy_from_user(ibs, USRPTR(params.ibs), ib_size)) {
  2475. ret = -EFAULT;
  2476. goto out;
  2477. }
  2478. if (params.num_allocations != 0) {
  2479. allocation_size = params.num_allocations *
  2480. sizeof(struct gsl_memory_object_t);
  2481. allocations = hgsl_malloc(allocation_size);
  2482. if (allocations == NULL) {
  2483. ret = -ENOMEM;
  2484. goto out;
  2485. }
  2486. if (copy_from_user(allocations, USRPTR(params.allocations),
  2487. allocation_size)) {
  2488. ret = -EFAULT;
  2489. goto out;
  2490. }
  2491. }
  2492. if (params.num_ibs > UINT_MAX - params.num_allocations) {
  2493. ret = -ENOMEM;
  2494. LOGE("Too many ibs or allocations: num_ibs = %u, num_allocations = %u",
  2495. params.num_ibs, params.num_allocations);
  2496. goto out;
  2497. }
  2498. be_data_size = (params.num_ibs + params.num_allocations) *
  2499. (sizeof(struct gsl_memdesc_t) + sizeof(uint64_t));
  2500. be_descs = (struct gsl_memdesc_t *)hgsl_malloc(be_data_size);
  2501. if (be_descs == NULL) {
  2502. ret = -ENOMEM;
  2503. goto out;
  2504. }
  2505. be_offsets = (uint64_t *)&be_descs[params.num_ibs +
  2506. params.num_allocations];
  2507. if (copy_from_user(be_descs, USRPTR(params.be_data), be_data_size)) {
  2508. ret = -EFAULT;
  2509. goto out;
  2510. }
  2511. ts = params.timestamp;
  2512. ret = hgsl_db_issueib_with_alloc_list(priv, &params, ibs,
  2513. allocations, be_descs, be_offsets, &ts);
  2514. if (!ret) {
  2515. params.rval = GSL_SUCCESS;
  2516. params.timestamp = ts;
  2517. } else if (ret == -EPERM)
  2518. remote_issueib = true;
  2519. }
  2520. if (remote_issueib)
  2521. ret = hgsl_hyp_issueib_with_alloc_list(&priv->hyp_priv,
  2522. &params, ibs, allocations, be_descs, be_offsets);
  2523. if (copy_to_user(USRPTR(arg), &params, sizeof(params))) {
  2524. LOGE("failed to copy param to user");
  2525. ret = -EFAULT;
  2526. goto out;
  2527. }
  2528. out:
  2529. hgsl_free(ibs);
  2530. hgsl_free(allocations);
  2531. hgsl_free(be_descs);
  2532. return ret;
  2533. }
  2534. static int hgsl_ioctl_wait_timestamp(struct file *filep, unsigned long arg)
  2535. {
  2536. struct hgsl_priv *priv = filep->private_data;
  2537. struct qcom_hgsl *hgsl = priv->dev;
  2538. struct hgsl_wait_ts_info param;
  2539. int ret;
  2540. bool expired;
  2541. bool remote_wait = false;
  2542. struct hgsl_context *ctxt;
  2543. if (copy_from_user(&param, USRPTR(arg), sizeof(param))) {
  2544. LOGE("failed to copy param from user");
  2545. return -EFAULT;
  2546. }
  2547. ctxt = hgsl_get_context_owner(priv, param.context_id);
  2548. if (!ctxt) {
  2549. LOGE("Invalid context id %d", param.context_id);
  2550. return -EINVAL;
  2551. }
  2552. if (param.channel_id) {
  2553. remote_wait = true;
  2554. } else {
  2555. ret = hgsl_check_shadow_timestamp(ctxt,
  2556. GSL_TIMESTAMP_RETIRED, param.timestamp,
  2557. &expired);
  2558. if (ret)
  2559. remote_wait = true;
  2560. else if (!expired) {
  2561. ret = hgsl_wait_timestamp(hgsl, &param);
  2562. if (ret == -EPERM)
  2563. remote_wait = true;
  2564. }
  2565. }
  2566. hgsl_put_context(ctxt);
  2567. if (remote_wait) {
  2568. /* dbq or shadow timestamp is not enabled */
  2569. ret = hgsl_hyp_wait_timestamp(&priv->hyp_priv, &param);
  2570. if (ret == -EINTR) {
  2571. if (copy_to_user(USRPTR(arg), &param, sizeof(param))) {
  2572. LOGE("failed to copy param to user");
  2573. return -EFAULT;
  2574. }
  2575. }
  2576. }
  2577. return ret;
  2578. }
  2579. static int hgsl_ioctl_read_timestamp(struct file *filep, unsigned long arg)
  2580. {
  2581. struct hgsl_priv *priv = filep->private_data;
  2582. struct hgsl_ioctl_read_ts_params param;
  2583. int ret;
  2584. struct hgsl_context *ctxt;
  2585. if (copy_from_user(&param, USRPTR(arg), sizeof(param))) {
  2586. LOGE("failed to copy param from user");
  2587. return -EFAULT;
  2588. }
  2589. ctxt = hgsl_get_context_owner(priv, param.ctxthandle);
  2590. if (!ctxt) {
  2591. LOGE("Invalid context id %d", param.ctxthandle);
  2592. return -EINVAL;
  2593. }
  2594. ret = hgsl_read_shadow_timestamp(ctxt,
  2595. param.type, &param.timestamp);
  2596. hgsl_put_context(ctxt);
  2597. if (ret)
  2598. ret = hgsl_hyp_read_timestamp(&priv->hyp_priv, &param);
  2599. if (ret == 0) {
  2600. if (copy_to_user(USRPTR(arg), &param, sizeof(param))) {
  2601. LOGE("failed to copy param to user");
  2602. return -EFAULT;
  2603. }
  2604. }
  2605. return ret;
  2606. }
  2607. static int hgsl_ioctl_check_timestamp(struct file *filep, unsigned long arg)
  2608. {
  2609. struct hgsl_priv *priv = filep->private_data;
  2610. struct hgsl_ioctl_check_ts_params param;
  2611. int ret;
  2612. bool expired;
  2613. struct hgsl_context *ctxt;
  2614. if (copy_from_user(&param, USRPTR(arg), sizeof(param))) {
  2615. LOGE("failed to copy param from user");
  2616. return -EFAULT;
  2617. }
  2618. ctxt = hgsl_get_context_owner(priv, param.ctxthandle);
  2619. if (!ctxt) {
  2620. LOGE("Invalid context id %d", param.ctxthandle);
  2621. return -EINVAL;
  2622. }
  2623. ret = hgsl_check_shadow_timestamp(ctxt, param.type,
  2624. param.timestamp, &expired);
  2625. if (ret)
  2626. param.rval = -1;
  2627. else
  2628. param.rval = expired ? 1 : 0;
  2629. hgsl_put_context(ctxt);
  2630. if (ret)
  2631. ret = hgsl_hyp_check_timestamp(&priv->hyp_priv, &param);
  2632. if (ret == 0) {
  2633. if (copy_to_user(USRPTR(arg), &param, sizeof(param))) {
  2634. LOGE("failed to copy param to user");
  2635. return -EFAULT;
  2636. }
  2637. }
  2638. return ret;
  2639. }
  2640. static int hgsl_ioctl_get_system_time(struct file *filep, unsigned long arg)
  2641. {
  2642. struct hgsl_priv *priv = filep->private_data;
  2643. uint64_t param;
  2644. int ret = 0;
  2645. if (copy_from_user(&param, USRPTR(arg), sizeof(param))) {
  2646. LOGE("failed to copy param from user");
  2647. return -EFAULT;
  2648. }
  2649. ret = hgsl_hyp_get_system_time(&priv->hyp_priv, &param);
  2650. if (!ret) {
  2651. if (copy_to_user(USRPTR(arg), &param, sizeof(param))) {
  2652. LOGE("failed to copy param to user");
  2653. return -EFAULT;
  2654. }
  2655. }
  2656. return ret;
  2657. }
  2658. static int hgsl_ioctl_syncobj_wait_multiple(struct file *filep,
  2659. unsigned long arg)
  2660. {
  2661. struct hgsl_priv *priv = filep->private_data;
  2662. struct hgsl_ioctl_syncobj_wait_multiple_params param;
  2663. int ret = 0;
  2664. uint64_t *rpc_syncobj = NULL;
  2665. int32_t *status = NULL;
  2666. size_t rpc_syncobj_size = 0;
  2667. size_t status_size = 0;
  2668. if (copy_from_user(&param, USRPTR(arg), sizeof(param))) {
  2669. LOGE("failed to copy param from user");
  2670. ret = -EFAULT;
  2671. goto out;
  2672. }
  2673. if ((param.num_syncobjs == 0) ||
  2674. (param.num_syncobjs > (SIZE_MAX / sizeof(uint64_t))) ||
  2675. (param.num_syncobjs > (SIZE_MAX / sizeof(int32_t)))) {
  2676. LOGE("invalid num_syncobjs %zu", param.num_syncobjs);
  2677. return -EINVAL;
  2678. goto out;
  2679. }
  2680. rpc_syncobj_size = sizeof(uint64_t) * param.num_syncobjs;
  2681. rpc_syncobj = (uint64_t *)hgsl_malloc(rpc_syncobj_size);
  2682. if (rpc_syncobj == NULL) {
  2683. LOGE("failed to allocate memory");
  2684. ret = -ENOMEM;
  2685. goto out;
  2686. }
  2687. if (copy_from_user(rpc_syncobj, USRPTR(param.rpc_syncobj),
  2688. rpc_syncobj_size)) {
  2689. LOGE("failed to copy param from user");
  2690. ret = -EFAULT;
  2691. goto out;
  2692. }
  2693. status_size = sizeof(int32_t) * param.num_syncobjs;
  2694. status = (int32_t *)hgsl_malloc(status_size);
  2695. if (status == NULL) {
  2696. LOGE("failed to allocate memory");
  2697. ret = -ENOMEM;
  2698. goto out;
  2699. }
  2700. memset(status, 0, status_size);
  2701. ret = hgsl_hyp_syncobj_wait_multiple(&priv->hyp_priv, rpc_syncobj,
  2702. param.num_syncobjs, param.timeout_ms, status, &param.result);
  2703. if (ret == 0) {
  2704. if (copy_to_user(USRPTR(arg), &param, sizeof(param))) {
  2705. ret = -EFAULT;
  2706. goto out;
  2707. }
  2708. if (copy_to_user(USRPTR(param.status), status, status_size)) {
  2709. ret = -EFAULT;
  2710. goto out;
  2711. }
  2712. }
  2713. out:
  2714. hgsl_free(rpc_syncobj);
  2715. hgsl_free(status);
  2716. return ret;
  2717. }
  2718. static int hgsl_ioctl_perfcounter_select(struct file *filep, unsigned long arg)
  2719. {
  2720. struct hgsl_priv *priv = filep->private_data;
  2721. struct hgsl_ioctl_perfcounter_select_params param;
  2722. int ret = 0;
  2723. uint32_t *groups = NULL;
  2724. uint32_t *counter_ids = NULL;
  2725. uint32_t *counter_val_regs = NULL;
  2726. uint32_t *counter_val_hi_regs = NULL;
  2727. if (copy_from_user(&param, USRPTR(arg), sizeof(param))) {
  2728. LOGE("failed to copy param from user");
  2729. ret = -EFAULT;
  2730. goto out;
  2731. }
  2732. if ((param.num_counters <= 0) ||
  2733. (param.num_counters > (SIZE_MAX / (sizeof(int32_t) * 4)))) {
  2734. LOGE("invalid num_counters %zu", param.num_counters);
  2735. return -EINVAL;
  2736. goto out;
  2737. }
  2738. groups = (uint32_t *)hgsl_malloc(
  2739. sizeof(int32_t) * 4 * param.num_counters);
  2740. if (groups == NULL) {
  2741. LOGE("failed to allocate memory");
  2742. ret = -ENOMEM;
  2743. goto out;
  2744. }
  2745. counter_ids = groups + param.num_counters;
  2746. counter_val_regs = counter_ids + param.num_counters;
  2747. counter_val_hi_regs = counter_val_regs + param.num_counters;
  2748. if (copy_from_user(groups, USRPTR(param.groups),
  2749. sizeof(uint32_t) * param.num_counters)) {
  2750. LOGE("failed to copy groups from user");
  2751. ret = -EFAULT;
  2752. goto out;
  2753. }
  2754. if (copy_from_user(counter_ids, USRPTR(param.counter_ids),
  2755. sizeof(uint32_t) * param.num_counters)) {
  2756. LOGE("failed to copy counter_ids from user");
  2757. ret = -EFAULT;
  2758. goto out;
  2759. }
  2760. ret = hgsl_hyp_perfcounter_select(&priv->hyp_priv, &param, groups,
  2761. counter_ids, counter_val_regs, counter_val_hi_regs);
  2762. if (!ret) {
  2763. if (copy_to_user(USRPTR(arg), &param, sizeof(param))) {
  2764. ret = -EFAULT;
  2765. goto out;
  2766. }
  2767. if (copy_to_user(USRPTR(param.counter_val_regs),
  2768. counter_val_regs,
  2769. sizeof(uint32_t) * param.num_counters)) {
  2770. ret = -EFAULT;
  2771. goto out;
  2772. }
  2773. if (param.counter_val_hi_regs) {
  2774. if (copy_to_user(USRPTR(param.counter_val_hi_regs),
  2775. counter_val_hi_regs,
  2776. sizeof(uint32_t) * param.num_counters)) {
  2777. ret = -EFAULT;
  2778. goto out;
  2779. }
  2780. }
  2781. }
  2782. out:
  2783. hgsl_free(groups);
  2784. return ret;
  2785. }
  2786. static int hgsl_ioctl_perfcounter_deselect(struct file *filep,
  2787. unsigned long arg)
  2788. {
  2789. struct hgsl_priv *priv = filep->private_data;
  2790. struct hgsl_ioctl_perfcounter_deselect_params param;
  2791. int ret = 0;
  2792. uint32_t *groups = NULL;
  2793. uint32_t *counter_ids = NULL;
  2794. if (copy_from_user(&param, USRPTR(arg), sizeof(param))) {
  2795. LOGE("failed to copy param from user");
  2796. ret = -EFAULT;
  2797. goto out;
  2798. }
  2799. if ((param.num_counters <= 0) ||
  2800. (param.num_counters > (SIZE_MAX / (sizeof(int32_t) * 2)))) {
  2801. LOGE("invalid num_counters %zu", param.num_counters);
  2802. return -EINVAL;
  2803. goto out;
  2804. }
  2805. groups = (uint32_t *)hgsl_malloc(
  2806. sizeof(int32_t) * 2 * param.num_counters);
  2807. if (groups == NULL) {
  2808. LOGE("failed to allocate memory");
  2809. ret = -ENOMEM;
  2810. goto out;
  2811. }
  2812. counter_ids = groups + param.num_counters;
  2813. if (copy_from_user(groups, USRPTR(param.groups),
  2814. sizeof(uint32_t) * param.num_counters)) {
  2815. LOGE("failed to copy groups from user");
  2816. ret = -EFAULT;
  2817. goto out;
  2818. }
  2819. if (copy_from_user(counter_ids, USRPTR(param.counter_ids),
  2820. sizeof(uint32_t) * param.num_counters)) {
  2821. LOGE("failed to copy counter_ids from user");
  2822. ret = -EFAULT;
  2823. goto out;
  2824. }
  2825. ret = hgsl_hyp_perfcounter_deselect(&priv->hyp_priv,
  2826. &param, groups, counter_ids);
  2827. out:
  2828. hgsl_free(groups);
  2829. return ret;
  2830. }
  2831. static int hgsl_ioctl_perfcounter_query_selection(struct file *filep,
  2832. unsigned long arg)
  2833. {
  2834. struct hgsl_priv *priv = filep->private_data;
  2835. struct hgsl_ioctl_perfcounter_query_selections_params param;
  2836. int ret = 0;
  2837. int32_t *selections = NULL;
  2838. if (copy_from_user(&param, USRPTR(arg), sizeof(param))) {
  2839. LOGE("failed to copy param from user");
  2840. ret = -EFAULT;
  2841. goto out;
  2842. }
  2843. if ((param.num_counters <= 0) ||
  2844. (param.num_counters > (SIZE_MAX / sizeof(int32_t)))) {
  2845. LOGE("invalid num_counters %zu", param.num_counters);
  2846. return -EINVAL;
  2847. goto out;
  2848. }
  2849. selections = (int32_t *)hgsl_malloc(
  2850. sizeof(int32_t) * param.num_counters);
  2851. if (selections == NULL) {
  2852. LOGE("failed to allocate memory");
  2853. ret = -ENOMEM;
  2854. goto out;
  2855. }
  2856. memset(selections, 0, sizeof(int32_t) * param.num_counters);
  2857. ret = hgsl_hyp_perfcounter_query_selections(&priv->hyp_priv,
  2858. &param, selections);
  2859. if (ret)
  2860. goto out;
  2861. if (copy_to_user(USRPTR(arg), &param, sizeof(param))) {
  2862. ret = -EFAULT;
  2863. goto out;
  2864. }
  2865. if (param.selections != 0) {
  2866. if (copy_to_user(USRPTR(param.selections), selections,
  2867. sizeof(int32_t) * param.num_counters)) {
  2868. ret = -EFAULT;
  2869. goto out;
  2870. }
  2871. }
  2872. out:
  2873. hgsl_free(selections);
  2874. return ret;
  2875. }
  2876. static int hgsl_ioctl_perfcounter_read(struct file *filep, unsigned long arg)
  2877. {
  2878. struct hgsl_priv *priv = filep->private_data;
  2879. struct hgsl_ioctl_perfcounter_read_params param;
  2880. int ret = 0;
  2881. if (copy_from_user(&param, USRPTR(arg), sizeof(param))) {
  2882. LOGE("failed to copy param from user");
  2883. ret = -EFAULT;
  2884. goto out;
  2885. }
  2886. ret = hgsl_hyp_perfcounter_read(&priv->hyp_priv, &param);
  2887. if (ret == 0) {
  2888. if (copy_to_user(USRPTR(arg), &param, sizeof(param))) {
  2889. ret = -EFAULT;
  2890. goto out;
  2891. }
  2892. }
  2893. out:
  2894. return ret;
  2895. }
  2896. static int hgsl_open(struct inode *inodep, struct file *filep)
  2897. {
  2898. struct hgsl_priv *priv = NULL;
  2899. struct qcom_hgsl *hgsl = container_of(inodep->i_cdev,
  2900. struct qcom_hgsl, cdev);
  2901. struct pid *pid = task_tgid(current);
  2902. struct task_struct *task = pid_task(pid, PIDTYPE_PID);
  2903. pid_t pid_nr;
  2904. int ret = 0;
  2905. if (!task)
  2906. return -EINVAL;
  2907. pid_nr = task_pid_nr(task);
  2908. mutex_lock(&hgsl->mutex);
  2909. list_for_each_entry(priv, &hgsl->active_list, node) {
  2910. if (priv->pid == pid_nr) {
  2911. priv->open_count++;
  2912. goto out;
  2913. }
  2914. }
  2915. priv = hgsl_zalloc(sizeof(*priv));
  2916. if (!priv) {
  2917. ret = -ENOMEM;
  2918. goto out;
  2919. }
  2920. INIT_LIST_HEAD(&priv->mem_mapped);
  2921. INIT_LIST_HEAD(&priv->mem_allocated);
  2922. mutex_init(&priv->lock);
  2923. priv->pid = pid_nr;
  2924. ret = hgsl_hyp_init(&priv->hyp_priv, hgsl->dev,
  2925. priv->pid, task->comm);
  2926. if (ret != 0)
  2927. goto out;
  2928. priv->dev = hgsl;
  2929. priv->open_count = 1;
  2930. list_add(&priv->node, &hgsl->active_list);
  2931. hgsl_sysfs_client_init(priv);
  2932. hgsl_debugfs_client_init(priv);
  2933. out:
  2934. if (ret != 0)
  2935. kfree(priv);
  2936. else
  2937. filep->private_data = priv;
  2938. mutex_unlock(&hgsl->mutex);
  2939. return ret;
  2940. }
  2941. static int hgsl_cleanup(struct hgsl_priv *priv)
  2942. {
  2943. struct hgsl_mem_node *node_found = NULL;
  2944. struct hgsl_mem_node *tmp = NULL;
  2945. int ret;
  2946. bool need_notify = (!list_empty(&priv->mem_mapped) ||
  2947. !list_empty(&priv->mem_allocated));
  2948. struct hgsl_hab_channel_t *hab_channel = NULL;
  2949. if (need_notify) {
  2950. ret = hgsl_hyp_channel_pool_get(&priv->hyp_priv, 0, &hab_channel);
  2951. if (ret)
  2952. LOGE("Failed to get channel %d", ret);
  2953. ret = hgsl_hyp_notify_cleanup(hab_channel, HGSL_CLEANUP_WAIT_SLICE_IN_MS);
  2954. if (ret == -ETIMEDOUT) {
  2955. hgsl_hyp_channel_pool_put(hab_channel);
  2956. return ret;
  2957. }
  2958. }
  2959. mutex_lock(&priv->lock);
  2960. if ((hab_channel == NULL) &&
  2961. (!list_empty(&priv->mem_mapped) || !list_empty(&priv->mem_allocated))) {
  2962. ret = hgsl_hyp_channel_pool_get(&priv->hyp_priv, 0, &hab_channel);
  2963. if (ret)
  2964. LOGE("Failed to get channel %d", ret);
  2965. }
  2966. list_for_each_entry_safe(node_found, tmp, &priv->mem_mapped, node) {
  2967. hgsl_put_sgt(node_found, false);
  2968. ret = hgsl_hyp_mem_unmap_smmu(hab_channel, node_found);
  2969. if (ret)
  2970. LOGE("Failed to clean mapped buffer %u, 0x%llx, ret %d",
  2971. node_found->export_id, node_found->memdesc.gpuaddr, ret);
  2972. else
  2973. hgsl_trace_gpu_mem_total(priv, -(node_found->memdesc.size64));
  2974. list_del(&node_found->node);
  2975. hgsl_free(node_found);
  2976. }
  2977. list_for_each_entry_safe(node_found, tmp, &priv->mem_allocated, node) {
  2978. ret = hgsl_hyp_mem_unmap_smmu(hab_channel, node_found);
  2979. if (ret)
  2980. LOGE("Failed to clean mapped buffer %u, 0x%llx, ret %d",
  2981. node_found->export_id, node_found->memdesc.gpuaddr, ret);
  2982. list_del(&node_found->node);
  2983. hgsl_trace_gpu_mem_total(priv, -(node_found->memdesc.size64));
  2984. hgsl_sharedmem_free(node_found);
  2985. }
  2986. mutex_unlock(&priv->lock);
  2987. hgsl_hyp_channel_pool_put(hab_channel);
  2988. return 0;
  2989. }
  2990. static int _hgsl_release(struct hgsl_priv *priv)
  2991. {
  2992. struct qcom_hgsl *hgsl = priv->dev;
  2993. uint32_t i;
  2994. int ret;
  2995. read_lock(&hgsl->ctxt_lock);
  2996. for (i = 0; i < HGSL_CONTEXT_NUM; i++) {
  2997. if ((hgsl->contexts != NULL) &&
  2998. (hgsl->contexts[i] != NULL) &&
  2999. (priv == hgsl->contexts[i]->priv)) {
  3000. read_unlock(&hgsl->ctxt_lock);
  3001. hgsl_ctxt_destroy(priv, NULL, i, NULL, false);
  3002. read_lock(&hgsl->ctxt_lock);
  3003. }
  3004. }
  3005. read_unlock(&hgsl->ctxt_lock);
  3006. hgsl_isync_fini(priv);
  3007. ret = hgsl_cleanup(priv);
  3008. if (ret)
  3009. return ret;
  3010. hgsl_hyp_close(&priv->hyp_priv);
  3011. hgsl_free(priv);
  3012. return 0;
  3013. }
  3014. static void hgsl_release_worker(struct work_struct *work)
  3015. {
  3016. struct qcom_hgsl *hgsl =
  3017. container_of(work, struct qcom_hgsl, release_work);
  3018. struct hgsl_priv *priv = NULL;
  3019. int ret;
  3020. while (true) {
  3021. mutex_lock(&hgsl->mutex);
  3022. if (!list_empty(&hgsl->release_list)) {
  3023. priv = container_of(hgsl->release_list.next,
  3024. struct hgsl_priv, node);
  3025. list_del(&priv->node);
  3026. } else {
  3027. priv = NULL;
  3028. }
  3029. mutex_unlock(&hgsl->mutex);
  3030. if (!priv)
  3031. break;
  3032. ret = _hgsl_release(priv);
  3033. if (ret == -ETIMEDOUT) {
  3034. mutex_lock(&hgsl->mutex);
  3035. list_add_tail(&priv->node, &hgsl->release_list);
  3036. mutex_unlock(&hgsl->mutex);
  3037. }
  3038. }
  3039. }
  3040. static int hgsl_init_release_wq(struct qcom_hgsl *hgsl)
  3041. {
  3042. int ret = 0;
  3043. hgsl->release_wq = alloc_workqueue("hgsl-release-wq", WQ_HIGHPRI, 0);
  3044. if (IS_ERR_OR_NULL(hgsl->release_wq)) {
  3045. dev_err(hgsl->dev, "failed to create workqueue\n");
  3046. ret = PTR_ERR(hgsl->release_wq);
  3047. goto out;
  3048. }
  3049. INIT_WORK(&hgsl->release_work, hgsl_release_worker);
  3050. INIT_LIST_HEAD(&hgsl->release_list);
  3051. mutex_init(&hgsl->mutex);
  3052. out:
  3053. return ret;
  3054. }
  3055. static int hgsl_release(struct inode *inodep, struct file *filep)
  3056. {
  3057. struct hgsl_priv *priv = filep->private_data;
  3058. struct qcom_hgsl *hgsl = priv->dev;
  3059. mutex_lock(&hgsl->mutex);
  3060. if (priv->open_count < 1)
  3061. WARN_ON(1);
  3062. else if (--priv->open_count == 0) {
  3063. list_move(&priv->node, &hgsl->release_list);
  3064. hgsl_debugfs_client_release(priv);
  3065. hgsl_sysfs_client_release(priv);
  3066. queue_work(hgsl->release_wq, &hgsl->release_work);
  3067. }
  3068. mutex_unlock(&hgsl->mutex);
  3069. return 0;
  3070. }
  3071. static ssize_t hgsl_read(struct file *filep, char __user *buf, size_t count,
  3072. loff_t *pos)
  3073. {
  3074. struct hgsl_priv *priv = filep->private_data;
  3075. struct qcom_hgsl *hgsl = priv->dev;
  3076. struct platform_device *pdev = to_platform_device(hgsl->dev);
  3077. uint32_t version = 0;
  3078. uint32_t release = 0;
  3079. char buff[100];
  3080. int ret = 0;
  3081. if (!hgsl->db_off) {
  3082. if (hgsl->reg_ver.vaddr == NULL) {
  3083. ret = hgsl_reg_map(pdev, IORESOURCE_HWINF, &hgsl->reg_ver);
  3084. if (ret < 0) {
  3085. dev_err(hgsl->dev, "Unable to map resource:%s\n",
  3086. IORESOURCE_HWINF);
  3087. }
  3088. }
  3089. if (hgsl->reg_ver.vaddr != NULL) {
  3090. hgsl_reg_read(&hgsl->reg_ver, 0, &version);
  3091. hgsl_reg_read(&hgsl->reg_ver, 4, &release);
  3092. snprintf(buff, 100, "gpu HW Version:%x HW Release:%x\n",
  3093. version, release);
  3094. } else {
  3095. snprintf(buff, 100, "Unable to read HW version\n");
  3096. }
  3097. } else {
  3098. snprintf(buff, 100, "Doorbell closed\n");
  3099. }
  3100. return simple_read_from_buffer(buf, count, pos,
  3101. buff, strlen(buff) + 1);
  3102. }
  3103. static int hgsl_ioctl_hsync_fence_create(struct file *filep,
  3104. unsigned long arg)
  3105. {
  3106. struct hgsl_priv *priv = filep->private_data;
  3107. struct qcom_hgsl *hgsl = priv->dev;
  3108. struct hgsl_hsync_fence_create param;
  3109. struct hgsl_context *ctxt = NULL;
  3110. int ret = 0;
  3111. if (hgsl->db_off) {
  3112. dev_err(hgsl->dev, "Doorbell not open\n");
  3113. return -EPERM;
  3114. }
  3115. if (copy_from_user(&param, USRPTR(arg), sizeof(param))) {
  3116. pr_err_ratelimited("failed to copy param from user");
  3117. ret = -EFAULT;
  3118. goto out;
  3119. }
  3120. ctxt = hgsl_get_context_owner(priv, param.context_id);
  3121. if ((ctxt == NULL) || (ctxt->timeline == NULL)) {
  3122. ret = -EINVAL;
  3123. goto out;
  3124. }
  3125. param.fence_fd = hgsl_hsync_fence_create_fd(ctxt, param.timestamp);
  3126. if (param.fence_fd < 0) {
  3127. ret = param.fence_fd;
  3128. goto out;
  3129. }
  3130. (void)copy_to_user(USRPTR(arg), &param, sizeof(param));
  3131. out:
  3132. hgsl_put_context(ctxt);
  3133. return ret;
  3134. }
  3135. static int hgsl_ioctl_isync_timeline_create(struct file *filep,
  3136. unsigned long arg)
  3137. {
  3138. struct hgsl_priv *priv = filep->private_data;
  3139. uint32_t param = 0;
  3140. int ret = 0;
  3141. ret = hgsl_isync_timeline_create(priv, &param, HGSL_ISYNC_32BITS_TIMELINE, 0);
  3142. if (ret == 0)
  3143. (void)copy_to_user(USRPTR(arg), &param, sizeof(param));
  3144. return ret;
  3145. }
  3146. static int hgsl_ioctl_isync_timeline_destroy(struct file *filep,
  3147. unsigned long arg)
  3148. {
  3149. struct hgsl_priv *priv = filep->private_data;
  3150. uint32_t param = 0;
  3151. int ret = 0;
  3152. if (copy_from_user(&param, USRPTR(arg), sizeof(param))) {
  3153. pr_err_ratelimited("failed to copy param from user");
  3154. ret = -EFAULT;
  3155. goto out;
  3156. }
  3157. ret = hgsl_isync_timeline_destroy(priv, param);
  3158. out:
  3159. return ret;
  3160. }
  3161. static int hgsl_ioctl_isync_fence_create(struct file *filep,
  3162. unsigned long arg)
  3163. {
  3164. struct hgsl_priv *priv = filep->private_data;
  3165. struct hgsl_isync_create_fence param;
  3166. int ret = 0;
  3167. int fence = 0;
  3168. bool ts_is_valid;
  3169. if (copy_from_user(&param, USRPTR(arg), sizeof(param))) {
  3170. pr_err_ratelimited("failed to copy param from user");
  3171. ret = -EFAULT;
  3172. goto out;
  3173. }
  3174. ts_is_valid = (param.padding == HGSL_ISYNC_FENCE_CREATE_USE_TS);
  3175. ret = hgsl_isync_fence_create(priv, param.timeline_id, param.ts,
  3176. ts_is_valid, &fence);
  3177. if (ret == 0) {
  3178. param.fence_id = fence;
  3179. (void)copy_to_user(USRPTR(arg), &param, sizeof(param));
  3180. }
  3181. out:
  3182. return ret;
  3183. }
  3184. static int hgsl_ioctl_isync_fence_signal(struct file *filep,
  3185. unsigned long arg)
  3186. {
  3187. struct hgsl_priv *priv = filep->private_data;
  3188. struct hgsl_isync_signal_fence param;
  3189. int ret = 0;
  3190. if (copy_from_user(&param, USRPTR(arg), sizeof(param))) {
  3191. pr_err_ratelimited("failed to copy param from user");
  3192. ret = -EFAULT;
  3193. goto out;
  3194. }
  3195. ret = hgsl_isync_fence_signal(priv, param.timeline_id,
  3196. param.fence_id);
  3197. out:
  3198. return ret;
  3199. }
  3200. static int hgsl_ioctl_isync_forward(struct file *filep,
  3201. unsigned long arg)
  3202. {
  3203. struct hgsl_priv *priv = filep->private_data;
  3204. struct hgsl_isync_forward param;
  3205. int ret = 0;
  3206. if (copy_from_user(&param, USRPTR(arg), sizeof(param))) {
  3207. pr_err_ratelimited("failed to copy param from user");
  3208. ret = -EFAULT;
  3209. goto out;
  3210. }
  3211. ret = hgsl_isync_forward(priv, param.timeline_id,
  3212. (uint64_t)param.ts, true);
  3213. out:
  3214. return ret;
  3215. }
  3216. static int hgsl_ioctl_timeline_create(struct file *filep,
  3217. unsigned long arg)
  3218. {
  3219. struct hgsl_priv *priv = filep->private_data;
  3220. struct hgsl_timeline_create param;
  3221. int ret = 0;
  3222. if (copy_from_user(&param, USRPTR(arg), sizeof(param)))
  3223. return -EFAULT;
  3224. ret = hgsl_isync_timeline_create(priv, &param.timeline_id,
  3225. HGSL_ISYNC_64BITS_TIMELINE, param.initial_ts);
  3226. if (ret == 0)
  3227. (void)copy_to_user(USRPTR(arg), &param, sizeof(param));
  3228. return ret;
  3229. }
  3230. static int hgsl_ioctl_timeline_signal(struct file *filep,
  3231. unsigned long arg)
  3232. {
  3233. struct hgsl_priv *priv = filep->private_data;
  3234. struct hgsl_timeline_signal param;
  3235. int ret = 0;
  3236. uint64_t timelines;
  3237. uint32_t i;
  3238. if (copy_from_user(&param, USRPTR(arg), sizeof(param)))
  3239. return -EFAULT;
  3240. if (!param.timelines_size)
  3241. param.timelines_size = sizeof(struct hgsl_timeline_val);
  3242. timelines = param.timelines;
  3243. for (i = 0; i < param.count; i++) {
  3244. struct hgsl_timeline_val val;
  3245. if (copy_struct_from_user(&val, sizeof(val),
  3246. USRPTR(timelines), param.timelines_size))
  3247. return -EFAULT;
  3248. if (val.padding)
  3249. return -EINVAL;
  3250. ret = hgsl_isync_forward(priv, val.timeline_id, val.timepoint, false);
  3251. if (ret)
  3252. return ret;
  3253. timelines += param.timelines_size;
  3254. }
  3255. return ret;
  3256. }
  3257. static int hgsl_ioctl_timeline_query(struct file *filep,
  3258. unsigned long arg)
  3259. {
  3260. struct hgsl_priv *priv = filep->private_data;
  3261. struct hgsl_timeline_query param;
  3262. int ret = 0;
  3263. uint64_t timelines;
  3264. uint32_t i;
  3265. if (copy_from_user(&param, USRPTR(arg), sizeof(param)))
  3266. return -EFAULT;
  3267. if (!param.timelines_size)
  3268. param.timelines_size = sizeof(struct hgsl_timeline_val);
  3269. timelines = param.timelines;
  3270. for (i = 0; i < param.count; i++) {
  3271. struct hgsl_timeline_val val;
  3272. if (copy_struct_from_user(&val, sizeof(val),
  3273. USRPTR(timelines), param.timelines_size))
  3274. return -EFAULT;
  3275. if (val.padding)
  3276. return -EINVAL;
  3277. ret = hgsl_isync_query(priv, val.timeline_id, &val.timepoint);
  3278. if (ret)
  3279. return ret;
  3280. (void)copy_to_user(USRPTR(timelines), &val, sizeof(val));
  3281. timelines += param.timelines_size;
  3282. }
  3283. return ret;
  3284. }
  3285. static int hgsl_ioctl_timeline_wait(struct file *filep,
  3286. unsigned long arg)
  3287. {
  3288. struct hgsl_priv *priv = filep->private_data;
  3289. struct hgsl_timeline_wait param;
  3290. int ret = 0;
  3291. if (copy_from_user(&param, USRPTR(arg), sizeof(param)))
  3292. return -EFAULT;
  3293. if (!param.timelines_size)
  3294. param.timelines_size = sizeof(struct hgsl_timeline_val);
  3295. ret = hgsl_isync_wait_multiple(priv, &param);
  3296. return ret;
  3297. }
  3298. static long hgsl_ioctl_misc(struct file *filep, unsigned int cmd, unsigned long arg)
  3299. {
  3300. int ret;
  3301. switch (cmd) {
  3302. case HGSL_IOCTL_ISSUE_IB:
  3303. ret = hgsl_ioctl_issueib(filep, arg);
  3304. break;
  3305. case HGSL_IOCTL_HYP_GENERIC_TRANSACTION:
  3306. ret = hgsl_ioctl_hyp_generic_transaction(filep, arg);
  3307. break;
  3308. case HGSL_IOCTL_ISSUIB_WITH_ALLOC_LIST:
  3309. ret = hgsl_ioctl_issueib_with_alloc_list(filep, arg);
  3310. break;
  3311. case HGSL_IOCTL_GET_SYSTEM_TIME:
  3312. ret = hgsl_ioctl_get_system_time(filep, arg);
  3313. break;
  3314. case HGSL_IOCTL_SYNCOBJ_WAIT_MULTIPLE:
  3315. ret = hgsl_ioctl_syncobj_wait_multiple(filep, arg);
  3316. break;
  3317. case HGSL_IOCTL_SET_METAINFO:
  3318. ret = hgsl_ioctl_set_metainfo(filep, arg);
  3319. break;
  3320. case HGSL_IOCTL_HSYNC_FENCE_CREATE:
  3321. ret = hgsl_ioctl_hsync_fence_create(filep, arg);
  3322. break;
  3323. default:
  3324. ret = -ENOIOCTLCMD;
  3325. }
  3326. return ret;
  3327. }
  3328. static long hgsl_ioctl_shadowts(struct file *filep, unsigned int cmd, unsigned long arg)
  3329. {
  3330. int ret;
  3331. switch (cmd) {
  3332. case HGSL_IOCTL_GET_SHADOWTS_MEM:
  3333. ret = hgsl_ioctl_get_shadowts_mem(filep, arg);
  3334. break;
  3335. case HGSL_IOCTL_PUT_SHADOWTS_MEM:
  3336. ret = hgsl_ioctl_put_shadowts_mem(filep, arg);
  3337. break;
  3338. default:
  3339. ret = -ENOIOCTLCMD;
  3340. }
  3341. return ret;
  3342. }
  3343. static long hgsl_ioctl_ctxt(struct file *filep, unsigned int cmd, unsigned long arg)
  3344. {
  3345. int ret;
  3346. switch (cmd) {
  3347. case HGSL_IOCTL_CTXT_CREATE:
  3348. ret = hgsl_ioctl_ctxt_create(filep, arg);
  3349. break;
  3350. case HGSL_IOCTL_CTXT_DESTROY:
  3351. ret = hgsl_ioctl_ctxt_destroy(filep, arg);
  3352. break;
  3353. default:
  3354. ret = -ENOIOCTLCMD;
  3355. }
  3356. return ret;
  3357. }
  3358. static long hgsl_ioctl_timestamp(struct file *filep, unsigned int cmd, unsigned long arg)
  3359. {
  3360. int ret;
  3361. switch (cmd) {
  3362. case HGSL_IOCTL_WAIT_TIMESTAMP:
  3363. ret = hgsl_ioctl_wait_timestamp(filep, arg);
  3364. break;
  3365. case HGSL_IOCTL_READ_TIMESTAMP:
  3366. ret = hgsl_ioctl_read_timestamp(filep, arg);
  3367. break;
  3368. case HGSL_IOCTL_CHECK_TIMESTAMP:
  3369. ret = hgsl_ioctl_check_timestamp(filep, arg);
  3370. break;
  3371. default:
  3372. ret = -ENOIOCTLCMD;
  3373. }
  3374. return ret;
  3375. }
  3376. static long hgsl_ioctl_mem(struct file *filep, unsigned int cmd, unsigned long arg)
  3377. {
  3378. int ret;
  3379. switch (cmd) {
  3380. case HGSL_IOCTL_MEM_ALLOC:
  3381. ret = hgsl_ioctl_mem_alloc(filep, arg);
  3382. break;
  3383. case HGSL_IOCTL_MEM_FREE:
  3384. ret = hgsl_ioctl_mem_free(filep, arg);
  3385. break;
  3386. case HGSL_IOCTL_MEM_MAP_SMMU:
  3387. ret = hgsl_ioctl_mem_map_smmu(filep, arg);
  3388. break;
  3389. case HGSL_IOCTL_MEM_UNMAP_SMMU:
  3390. ret = hgsl_ioctl_mem_unmap_smmu(filep, arg);
  3391. break;
  3392. case HGSL_IOCTL_MEM_CACHE_OPERATION:
  3393. ret = hgsl_ioctl_mem_cache_operation(filep, arg);
  3394. break;
  3395. case HGSL_IOCTL_MEM_GET_FD:
  3396. ret = hgsl_ioctl_mem_get_fd(filep, arg);
  3397. break;
  3398. default:
  3399. ret = -ENOIOCTLCMD;
  3400. }
  3401. return ret;
  3402. }
  3403. static long hgsl_ioctl_perfcounter(struct file *filep, unsigned int cmd, unsigned long arg)
  3404. {
  3405. int ret;
  3406. switch (cmd) {
  3407. case HGSL_IOCTL_PERFCOUNTER_SELECT:
  3408. ret = hgsl_ioctl_perfcounter_select(filep, arg);
  3409. break;
  3410. case HGSL_IOCTL_PERFCOUNTER_DESELECT:
  3411. ret = hgsl_ioctl_perfcounter_deselect(filep, arg);
  3412. break;
  3413. case HGSL_IOCTL_PERFCOUNTER_QUERY_SELECTION:
  3414. ret = hgsl_ioctl_perfcounter_query_selection(filep, arg);
  3415. break;
  3416. case HGSL_IOCTL_PERFCOUNTER_READ:
  3417. ret = hgsl_ioctl_perfcounter_read(filep, arg);
  3418. break;
  3419. default:
  3420. ret = -ENOIOCTLCMD;
  3421. }
  3422. return ret;
  3423. }
  3424. static long hgsl_ioctl_isync(struct file *filep, unsigned int cmd, unsigned long arg)
  3425. {
  3426. int ret;
  3427. switch (cmd) {
  3428. case HGSL_IOCTL_ISYNC_TIMELINE_CREATE:
  3429. ret = hgsl_ioctl_isync_timeline_create(filep, arg);
  3430. break;
  3431. case HGSL_IOCTL_ISYNC_TIMELINE_DESTROY:
  3432. ret = hgsl_ioctl_isync_timeline_destroy(filep, arg);
  3433. break;
  3434. case HGSL_IOCTL_ISYNC_FENCE_CREATE:
  3435. ret = hgsl_ioctl_isync_fence_create(filep, arg);
  3436. break;
  3437. case HGSL_IOCTL_ISYNC_FENCE_SIGNAL:
  3438. ret = hgsl_ioctl_isync_fence_signal(filep, arg);
  3439. break;
  3440. case HGSL_IOCTL_ISYNC_FORWARD:
  3441. ret = hgsl_ioctl_isync_forward(filep, arg);
  3442. break;
  3443. default:
  3444. ret = -ENOIOCTLCMD;
  3445. }
  3446. return ret;
  3447. }
  3448. static long hgsl_ioctl_timeline(struct file *filep, unsigned int cmd, unsigned long arg)
  3449. {
  3450. int ret;
  3451. switch (cmd) {
  3452. case HGSL_IOCTL_TIMELINE_CREATE:
  3453. ret = hgsl_ioctl_timeline_create(filep, arg);
  3454. break;
  3455. case HGSL_IOCTL_TIMELINE_SIGNAL:
  3456. ret = hgsl_ioctl_timeline_signal(filep, arg);
  3457. break;
  3458. case HGSL_IOCTL_TIMELINE_QUERY:
  3459. ret = hgsl_ioctl_timeline_query(filep, arg);
  3460. break;
  3461. case HGSL_IOCTL_TIMELINE_WAIT:
  3462. ret = hgsl_ioctl_timeline_wait(filep, arg);
  3463. break;
  3464. default:
  3465. ret = -ENOIOCTLCMD;
  3466. }
  3467. return ret;
  3468. }
  3469. static long hgsl_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
  3470. {
  3471. int ret;
  3472. ret = hgsl_ioctl_misc(filep, cmd, arg);
  3473. if (-ENOIOCTLCMD != ret)
  3474. goto out;
  3475. ret = hgsl_ioctl_shadowts(filep, cmd, arg);
  3476. if (-ENOIOCTLCMD != ret)
  3477. goto out;
  3478. ret = hgsl_ioctl_ctxt(filep, cmd, arg);
  3479. if (-ENOIOCTLCMD != ret)
  3480. goto out;
  3481. ret = hgsl_ioctl_timestamp(filep, cmd, arg);
  3482. if (-ENOIOCTLCMD != ret)
  3483. goto out;
  3484. ret = hgsl_ioctl_mem(filep, cmd, arg);
  3485. if (-ENOIOCTLCMD != ret)
  3486. goto out;
  3487. ret = hgsl_ioctl_isync(filep, cmd, arg);
  3488. if (-ENOIOCTLCMD != ret)
  3489. goto out;
  3490. ret = hgsl_ioctl_perfcounter(filep, cmd, arg);
  3491. if (-ENOIOCTLCMD != ret)
  3492. goto out;
  3493. ret = hgsl_ioctl_timeline(filep, cmd, arg);
  3494. if (-ENOIOCTLCMD != ret)
  3495. goto out;
  3496. out:
  3497. return ret;
  3498. }
  3499. static long hgsl_compat_ioctl(struct file *filep, unsigned int cmd,
  3500. unsigned long arg)
  3501. {
  3502. return hgsl_ioctl(filep, cmd, arg);
  3503. }
  3504. static const struct file_operations hgsl_fops = {
  3505. .owner = THIS_MODULE,
  3506. .open = hgsl_open,
  3507. .release = hgsl_release,
  3508. .read = hgsl_read,
  3509. .unlocked_ioctl = hgsl_ioctl,
  3510. .compat_ioctl = hgsl_compat_ioctl
  3511. };
  3512. static int qcom_hgsl_register(struct platform_device *pdev,
  3513. struct qcom_hgsl *hgsl_dev)
  3514. {
  3515. int ret;
  3516. ret = alloc_chrdev_region(&hgsl_dev->device_no, 0,
  3517. HGSL_DEV_NUM,
  3518. HGSL_DEVICE_NAME);
  3519. if (ret < 0) {
  3520. dev_err(&pdev->dev, "alloc_chrdev_region failed %d\n", ret);
  3521. return ret;
  3522. }
  3523. hgsl_dev->driver_class = class_create(THIS_MODULE, HGSL_DEVICE_NAME);
  3524. if (IS_ERR(hgsl_dev->driver_class)) {
  3525. ret = -ENOMEM;
  3526. dev_err(&pdev->dev, "class_create failed %d\n", ret);
  3527. goto exit_unreg_chrdev_region;
  3528. }
  3529. hgsl_dev->class_dev = device_create(hgsl_dev->driver_class,
  3530. NULL,
  3531. hgsl_dev->device_no,
  3532. hgsl_dev, HGSL_DEVICE_NAME);
  3533. if (IS_ERR(hgsl_dev->class_dev)) {
  3534. dev_err(&pdev->dev, "class_device_create failed %d\n", ret);
  3535. ret = -ENOMEM;
  3536. goto exit_destroy_class;
  3537. }
  3538. cdev_init(&hgsl_dev->cdev, &hgsl_fops);
  3539. hgsl_dev->cdev.owner = THIS_MODULE;
  3540. ret = cdev_add(&hgsl_dev->cdev,
  3541. MKDEV(MAJOR(hgsl_dev->device_no), 0),
  3542. 1);
  3543. if (ret < 0) {
  3544. dev_err(&pdev->dev, "cdev_add failed %d\n", ret);
  3545. goto exit_destroy_device;
  3546. }
  3547. ret = dma_coerce_mask_and_coherent(hgsl_dev->dev, DMA_BIT_MASK(64));
  3548. if (ret)
  3549. LOGW("Failed to set dma mask to 64 bits, ret = %d", ret);
  3550. return 0;
  3551. exit_destroy_device:
  3552. device_destroy(hgsl_dev->driver_class, hgsl_dev->device_no);
  3553. exit_destroy_class:
  3554. class_destroy(hgsl_dev->driver_class);
  3555. exit_unreg_chrdev_region:
  3556. unregister_chrdev_region(hgsl_dev->device_no, 1);
  3557. return ret;
  3558. }
  3559. static void qcom_hgsl_deregister(struct platform_device *pdev)
  3560. {
  3561. struct qcom_hgsl *hgsl_dev = platform_get_drvdata(pdev);
  3562. cdev_del(&hgsl_dev->cdev);
  3563. device_destroy(hgsl_dev->driver_class, hgsl_dev->device_no);
  3564. class_destroy(hgsl_dev->driver_class);
  3565. unregister_chrdev_region(hgsl_dev->device_no, HGSL_DEV_NUM);
  3566. }
  3567. static bool hgsl_is_db_off(struct platform_device *pdev)
  3568. {
  3569. uint32_t db_off = 0;
  3570. if (pdev == NULL)
  3571. return true;
  3572. db_off = of_property_read_bool(pdev->dev.of_node, "db-off");
  3573. return db_off == 1;
  3574. }
  3575. static int hgsl_reg_map(struct platform_device *pdev,
  3576. char *res_name, struct reg *reg)
  3577. {
  3578. struct resource *res;
  3579. int ret = 0;
  3580. if ((pdev == NULL) || (res_name == NULL) || (reg == NULL)) {
  3581. ret = -EINVAL;
  3582. goto exit;
  3583. }
  3584. res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
  3585. res_name);
  3586. if (res == NULL) {
  3587. dev_err(&pdev->dev, "get resource :%s failed\n",
  3588. res_name);
  3589. ret = -EINVAL;
  3590. goto exit;
  3591. }
  3592. if (res->start == 0 || resource_size(res) == 0) {
  3593. dev_err(&pdev->dev, "Register region %s is invalid\n",
  3594. res_name);
  3595. ret = -EINVAL;
  3596. goto exit;
  3597. }
  3598. reg->paddr = res->start;
  3599. reg->size = resource_size(res);
  3600. if (devm_request_mem_region(&pdev->dev,
  3601. reg->paddr, reg->size,
  3602. res_name) == NULL) {
  3603. dev_err(&pdev->dev, "request_mem_region for %s failed\n",
  3604. res_name);
  3605. ret = -ENODEV;
  3606. goto exit;
  3607. }
  3608. reg->vaddr = devm_ioremap(&pdev->dev, res->start,
  3609. resource_size(res));
  3610. if (reg->vaddr == NULL) {
  3611. dev_err(&pdev->dev, "Unable to remap %s registers\n",
  3612. res_name);
  3613. ret = -ENODEV;
  3614. goto exit;
  3615. }
  3616. exit:
  3617. return ret;
  3618. }
  3619. static int hgsl_suspend(struct device *dev)
  3620. {
  3621. /* Do nothing */
  3622. return 0;
  3623. }
  3624. static int hgsl_resume(struct device *dev)
  3625. {
  3626. struct platform_device *pdev = to_platform_device(dev);
  3627. struct qcom_hgsl *hgsl = platform_get_drvdata(pdev);
  3628. struct hgsl_tcsr *tcsr = NULL;
  3629. int tcsr_idx = 0;
  3630. if (pm_suspend_target_state == PM_SUSPEND_MEM) {
  3631. for (tcsr_idx = 0; tcsr_idx < HGSL_TCSR_NUM; tcsr_idx++) {
  3632. tcsr = hgsl->tcsr[tcsr_idx][HGSL_TCSR_ROLE_RECEIVER];
  3633. if (tcsr != NULL) {
  3634. hgsl_tcsr_irq_enable(tcsr,
  3635. GLB_DB_DEST_TS_RETIRE_IRQ_MASK, true);
  3636. }
  3637. }
  3638. /*
  3639. * There could be a scenario when GVM submit some work to GMU
  3640. * just before going to suspend, in this case, the GMU will
  3641. * not submit it to RB and when GMU resume(FW reload) happens,
  3642. * it submits the work to GPU and fire the ts_retire to GVM.
  3643. * At this point, the GVM is not up so it may miss the
  3644. * interrupt from GMU so check if there is any ts_retire by
  3645. * reading the shadow timestamp.
  3646. */
  3647. if (hgsl->wq != NULL)
  3648. queue_work(hgsl->wq, &hgsl->ts_retire_work);
  3649. }
  3650. return 0;
  3651. }
  3652. static int qcom_hgsl_probe(struct platform_device *pdev)
  3653. {
  3654. struct qcom_hgsl *hgsl_dev;
  3655. int ret;
  3656. int i;
  3657. hgsl_dev = devm_kzalloc(&pdev->dev, sizeof(*hgsl_dev), GFP_KERNEL);
  3658. if (!hgsl_dev)
  3659. return -ENOMEM;
  3660. hgsl_dev->dev = &pdev->dev;
  3661. ret = qcom_hgsl_register(pdev, hgsl_dev);
  3662. if (ret < 0) {
  3663. dev_err(&pdev->dev, "qcom_hgsl_register failed, ret %d\n",
  3664. ret);
  3665. return ret;
  3666. }
  3667. ret = hgsl_init_context(hgsl_dev);
  3668. if (ret < 0) {
  3669. dev_err(&pdev->dev, "hgsl_init_context failed, ret %d\n",
  3670. ret);
  3671. goto exit_dereg;
  3672. }
  3673. INIT_LIST_HEAD(&hgsl_dev->active_list);
  3674. ret = hgsl_init_release_wq(hgsl_dev);
  3675. if (ret < 0) {
  3676. dev_err(&pdev->dev, "hgsl_init_release_wq failed, ret %d\n",
  3677. ret);
  3678. goto exit_dereg;
  3679. }
  3680. hgsl_dev->db_off = hgsl_is_db_off(pdev);
  3681. idr_init(&hgsl_dev->isync_timeline_idr);
  3682. spin_lock_init(&hgsl_dev->isync_timeline_lock);
  3683. for (i = 0; i < MAX_DB_QUEUE; i++) {
  3684. mutex_init(&hgsl_dev->dbq[i].lock);
  3685. hgsl_dev->dbq[i].state = DB_STATE_Q_UNINIT;
  3686. }
  3687. if (!hgsl_dev->db_off)
  3688. hgsl_init_global_hyp_channel(hgsl_dev);
  3689. hgsl_dev->default_iocoherency = of_property_read_bool(pdev->dev.of_node,
  3690. "default_iocoherency");
  3691. platform_set_drvdata(pdev, hgsl_dev);
  3692. hgsl_sysfs_init(pdev);
  3693. hgsl_debugfs_init(pdev);
  3694. return 0;
  3695. exit_dereg:
  3696. qcom_hgsl_deregister(pdev);
  3697. return ret;
  3698. }
  3699. static int qcom_hgsl_remove(struct platform_device *pdev)
  3700. {
  3701. struct qcom_hgsl *hgsl = platform_get_drvdata(pdev);
  3702. struct hgsl_tcsr *tcsr_sender, *tcsr_receiver;
  3703. int i;
  3704. hgsl_debugfs_release(pdev);
  3705. hgsl_sysfs_release(pdev);
  3706. for (i = 0; i < HGSL_TCSR_NUM; i++) {
  3707. tcsr_sender = hgsl->tcsr[i][HGSL_TCSR_ROLE_SENDER];
  3708. tcsr_receiver = hgsl->tcsr[i][HGSL_TCSR_ROLE_RECEIVER];
  3709. if (tcsr_sender) {
  3710. hgsl_tcsr_disable(tcsr_sender);
  3711. hgsl_tcsr_free(tcsr_sender);
  3712. }
  3713. if (tcsr_receiver) {
  3714. hgsl_tcsr_disable(tcsr_receiver);
  3715. hgsl_tcsr_free(tcsr_receiver);
  3716. }
  3717. }
  3718. if (hgsl->wq) {
  3719. flush_workqueue(hgsl->wq);
  3720. destroy_workqueue(hgsl->wq);
  3721. hgsl->wq = NULL;
  3722. }
  3723. kfree(hgsl->contexts);
  3724. hgsl->contexts = NULL;
  3725. memset(hgsl->tcsr, 0, sizeof(hgsl->tcsr));
  3726. for (i = 0; i < MAX_DB_QUEUE; i++)
  3727. if (hgsl->dbq[i].state == DB_STATE_Q_INIT_DONE)
  3728. hgsl_reset_dbq(&hgsl->dbq[i]);
  3729. idr_destroy(&hgsl->isync_timeline_idr);
  3730. qcom_hgsl_deregister(pdev);
  3731. return 0;
  3732. }
  3733. static const struct dev_pm_ops hgsl_pm_ops = {
  3734. .suspend = hgsl_suspend,
  3735. .resume = hgsl_resume,
  3736. };
  3737. static const struct of_device_id qcom_hgsl_of_match[] = {
  3738. { .compatible = "qcom,hgsl" },
  3739. {}
  3740. };
  3741. MODULE_DEVICE_TABLE(of, qcom_hgsl_of_match);
  3742. static struct platform_driver qcom_hgsl_driver = {
  3743. .probe = qcom_hgsl_probe,
  3744. .remove = qcom_hgsl_remove,
  3745. .driver = {
  3746. .name = "qcom-hgsl",
  3747. .of_match_table = qcom_hgsl_of_match,
  3748. .pm = &hgsl_pm_ops,
  3749. },
  3750. };
  3751. static int __init hgsl_init(void)
  3752. {
  3753. int err;
  3754. err = platform_driver_register(&qcom_hgsl_driver);
  3755. if (err) {
  3756. pr_err("Failed to register hgsl driver: %d\n", err);
  3757. goto exit;
  3758. }
  3759. #if IS_ENABLED(CONFIG_QCOM_HGSL_TCSR_SIGNAL)
  3760. err = platform_driver_register(&hgsl_tcsr_driver);
  3761. if (err) {
  3762. pr_err("Failed to register hgsl tcsr driver: %d\n", err);
  3763. platform_driver_unregister(&qcom_hgsl_driver);
  3764. }
  3765. #endif
  3766. exit:
  3767. return err;
  3768. }
  3769. static void __exit hgsl_exit(void)
  3770. {
  3771. platform_driver_unregister(&qcom_hgsl_driver);
  3772. #if IS_ENABLED(CONFIG_QCOM_HGSL_TCSR_SIGNAL)
  3773. platform_driver_unregister(&hgsl_tcsr_driver);
  3774. #endif
  3775. }
  3776. module_init(hgsl_init);
  3777. module_exit(hgsl_exit);
  3778. MODULE_IMPORT_NS(DMA_BUF);
  3779. MODULE_DESCRIPTION("QTI Hypervisor Graphics system driver");
  3780. MODULE_LICENSE("GPL");