adreno_a6xx_gmu.c 101 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2018-2021, The Linux Foundation. All rights reserved.
  4. * Copyright (c) 2022-2024, Qualcomm Innovation Center, Inc. All rights reserved.
  5. */
  6. #include <dt-bindings/regulator/qcom,rpmh-regulator-levels.h>
  7. #include <linux/clk.h>
  8. #include <linux/component.h>
  9. #include <linux/delay.h>
  10. #include <linux/dma-map-ops.h>
  11. #include <linux/firmware.h>
  12. #include <linux/interconnect.h>
  13. #include <linux/io.h>
  14. #include <linux/kobject.h>
  15. #include <linux/of_platform.h>
  16. #include <linux/qcom-iommu-util.h>
  17. #include <linux/regulator/consumer.h>
  18. #include <linux/slab.h>
  19. #include <linux/sysfs.h>
  20. #include <linux/mailbox/qmp.h>
  21. #include <soc/qcom/cmd-db.h>
  22. #include "adreno.h"
  23. #include "adreno_a6xx.h"
  24. #include "adreno_trace.h"
  25. #include "kgsl_bus.h"
  26. #include "kgsl_device.h"
  27. #include "kgsl_trace.h"
  28. #include "kgsl_util.h"
  29. #define ARC_VOTE_GET_PRI(_v) ((_v) & 0xFF)
  30. #define ARC_VOTE_GET_SEC(_v) (((_v) >> 8) & 0xFF)
  31. #define ARC_VOTE_GET_VLVL(_v) (((_v) >> 16) & 0xFFFF)
  32. #define ARC_VOTE_SET(pri, sec, vlvl) \
  33. ((((vlvl) & 0xFFFF) << 16) | (((sec) & 0xFF) << 8) | ((pri) & 0xFF))
  34. static struct gmu_vma_entry a6xx_gmu_vma_legacy[] = {
  35. [GMU_ITCM] = {
  36. .start = 0x00000,
  37. .size = SZ_16K
  38. },
  39. [GMU_ICACHE] = {
  40. .start = 0x04000,
  41. .size = (SZ_256K - SZ_16K),
  42. .next_va = 0x4000
  43. },
  44. [GMU_DTCM] = {
  45. .start = 0x40000,
  46. .size = SZ_16K
  47. },
  48. [GMU_DCACHE] = {
  49. .start = 0x44000,
  50. .size = (SZ_256K - SZ_16K),
  51. .next_va = 0x44000
  52. },
  53. [GMU_NONCACHED_KERNEL] = {
  54. .start = 0x60000000,
  55. .size = SZ_512M,
  56. .next_va = 0x60000000
  57. },
  58. };
  59. static struct gmu_vma_entry a6xx_gmu_vma[] = {
  60. [GMU_ITCM] = {
  61. .start = 0x00000000,
  62. .size = SZ_16K
  63. },
  64. [GMU_CACHE] = {
  65. .start = SZ_16K,
  66. .size = (SZ_16M - SZ_16K),
  67. .next_va = SZ_16K
  68. },
  69. [GMU_DTCM] = {
  70. .start = SZ_256M + SZ_16K,
  71. .size = SZ_16K
  72. },
  73. [GMU_DCACHE] = {
  74. .start = 0x0,
  75. .size = 0x0
  76. },
  77. [GMU_NONCACHED_KERNEL] = {
  78. .start = 0x60000000,
  79. .size = SZ_512M,
  80. .next_va = 0x60000000
  81. },
  82. };
  83. static void _regwrite(void __iomem *regbase, u32 offsetwords, u32 value)
  84. {
  85. void __iomem *reg;
  86. reg = regbase + (offsetwords << 2);
  87. __raw_writel(value, reg);
  88. }
  89. static void _regrmw(void __iomem *regbase, u32 offsetwords, u32 mask, u32 or)
  90. {
  91. void __iomem *reg;
  92. u32 val;
  93. reg = regbase + (offsetwords << 2);
  94. val = __raw_readl(reg);
  95. /* Make sure the read posted and all pending writes are done */
  96. mb();
  97. __raw_writel((val & ~mask) | or, reg);
  98. }
  99. static ssize_t log_stream_enable_store(struct kobject *kobj,
  100. struct kobj_attribute *attr, const char *buf, size_t count)
  101. {
  102. struct a6xx_gmu_device *gmu = container_of(kobj, struct a6xx_gmu_device, log_kobj);
  103. bool val;
  104. int ret;
  105. ret = kstrtobool(buf, &val);
  106. if (ret)
  107. return ret;
  108. gmu->log_stream_enable = val;
  109. return count;
  110. }
  111. static ssize_t log_stream_enable_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
  112. {
  113. struct a6xx_gmu_device *gmu = container_of(kobj, struct a6xx_gmu_device, log_kobj);
  114. return scnprintf(buf, PAGE_SIZE, "%d\n", gmu->log_stream_enable);
  115. }
  116. static ssize_t log_group_mask_store(struct kobject *kobj,
  117. struct kobj_attribute *attr, const char *buf, size_t count)
  118. {
  119. struct a6xx_gmu_device *gmu = container_of(kobj, struct a6xx_gmu_device, log_kobj);
  120. u32 val;
  121. int ret;
  122. ret = kstrtou32(buf, 0, &val);
  123. if (ret)
  124. return ret;
  125. gmu->log_group_mask = val;
  126. return count;
  127. }
  128. static ssize_t log_group_mask_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
  129. {
  130. struct a6xx_gmu_device *gmu = container_of(kobj, struct a6xx_gmu_device, log_kobj);
  131. return scnprintf(buf, PAGE_SIZE, "%x\n", gmu->log_group_mask);
  132. }
  133. static struct kobj_attribute log_stream_enable_attr =
  134. __ATTR(log_stream_enable, 0644, log_stream_enable_show, log_stream_enable_store);
  135. static struct kobj_attribute log_group_mask_attr =
  136. __ATTR(log_group_mask, 0644, log_group_mask_show, log_group_mask_store);
  137. static struct attribute *log_attrs[] = {
  138. &log_stream_enable_attr.attr,
  139. &log_group_mask_attr.attr,
  140. NULL,
  141. };
  142. ATTRIBUTE_GROUPS(log);
  143. static struct kobj_type log_kobj_type = {
  144. .sysfs_ops = &kobj_sysfs_ops,
  145. .default_groups = log_groups,
  146. };
  147. static ssize_t stats_enable_store(struct kobject *kobj,
  148. struct kobj_attribute *attr, const char *buf, size_t count)
  149. {
  150. struct a6xx_gmu_device *gmu = container_of(kobj, struct a6xx_gmu_device, stats_kobj);
  151. bool val;
  152. int ret;
  153. ret = kstrtobool(buf, &val);
  154. if (ret)
  155. return ret;
  156. gmu->stats_enable = val;
  157. return count;
  158. }
  159. static ssize_t stats_enable_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
  160. {
  161. struct a6xx_gmu_device *gmu = container_of(kobj, struct a6xx_gmu_device, stats_kobj);
  162. return scnprintf(buf, PAGE_SIZE, "%d\n", gmu->stats_enable);
  163. }
  164. static ssize_t stats_mask_store(struct kobject *kobj,
  165. struct kobj_attribute *attr, const char *buf, size_t count)
  166. {
  167. struct a6xx_gmu_device *gmu = container_of(kobj, struct a6xx_gmu_device, stats_kobj);
  168. u32 val;
  169. int ret;
  170. ret = kstrtou32(buf, 0, &val);
  171. if (ret)
  172. return ret;
  173. gmu->stats_mask = val;
  174. return count;
  175. }
  176. static ssize_t stats_mask_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
  177. {
  178. struct a6xx_gmu_device *gmu = container_of(kobj, struct a6xx_gmu_device, stats_kobj);
  179. return scnprintf(buf, PAGE_SIZE, "%x\n", gmu->stats_mask);
  180. }
  181. static ssize_t stats_interval_store(struct kobject *kobj,
  182. struct kobj_attribute *attr, const char *buf, size_t count)
  183. {
  184. struct a6xx_gmu_device *gmu = container_of(kobj, struct a6xx_gmu_device, stats_kobj);
  185. u32 val;
  186. int ret;
  187. ret = kstrtou32(buf, 0, &val);
  188. if (ret)
  189. return ret;
  190. gmu->stats_interval = val;
  191. return count;
  192. }
  193. static ssize_t stats_interval_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
  194. {
  195. struct a6xx_gmu_device *gmu = container_of(kobj, struct a6xx_gmu_device, stats_kobj);
  196. return scnprintf(buf, PAGE_SIZE, "%x\n", gmu->stats_interval);
  197. }
  198. static struct kobj_attribute stats_enable_attr =
  199. __ATTR(stats_enable, 0644, stats_enable_show, stats_enable_store);
  200. static struct kobj_attribute stats_mask_attr =
  201. __ATTR(stats_mask, 0644, stats_mask_show, stats_mask_store);
  202. static struct kobj_attribute stats_interval_attr =
  203. __ATTR(stats_interval, 0644, stats_interval_show, stats_interval_store);
  204. static struct attribute *stats_attrs[] = {
  205. &stats_enable_attr.attr,
  206. &stats_mask_attr.attr,
  207. &stats_interval_attr.attr,
  208. NULL,
  209. };
  210. ATTRIBUTE_GROUPS(stats);
  211. static struct kobj_type stats_kobj_type = {
  212. .sysfs_ops = &kobj_sysfs_ops,
  213. .default_groups = stats_groups,
  214. };
  215. static int timed_poll_check_rscc(struct kgsl_device *device,
  216. unsigned int offset, unsigned int expected_ret,
  217. unsigned int timeout, unsigned int mask)
  218. {
  219. struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
  220. struct a6xx_gmu_device *gmu = to_a6xx_gmu(adreno_dev);
  221. u32 value;
  222. if (!adreno_is_a650_family(adreno_dev))
  223. return gmu_core_timed_poll_check(device,
  224. offset + RSCC_OFFSET_LEGACY,
  225. expected_ret, timeout, mask);
  226. return readl_poll_timeout(gmu->rscc_virt + (offset << 2), value,
  227. (value & mask) == expected_ret, 100, timeout * 1000);
  228. }
  229. struct a6xx_gmu_device *to_a6xx_gmu(struct adreno_device *adreno_dev)
  230. {
  231. struct a6xx_device *a6xx_dev = container_of(adreno_dev,
  232. struct a6xx_device, adreno_dev);
  233. return &a6xx_dev->gmu;
  234. }
  235. struct adreno_device *a6xx_gmu_to_adreno(struct a6xx_gmu_device *gmu)
  236. {
  237. struct a6xx_device *a6xx_dev =
  238. container_of(gmu, struct a6xx_device, gmu);
  239. return &a6xx_dev->adreno_dev;
  240. }
  241. #define RSC_CMD_OFFSET 2
  242. #define PDC_CMD_OFFSET 4
  243. #define PDC_ENABLE_REG_VALUE 0x80000001
  244. void a6xx_load_rsc_ucode(struct adreno_device *adreno_dev)
  245. {
  246. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  247. struct a6xx_gmu_device *gmu = to_a6xx_gmu(adreno_dev);
  248. void __iomem *rscc;
  249. if (adreno_is_a650_family(adreno_dev))
  250. rscc = gmu->rscc_virt;
  251. else
  252. rscc = kgsl_regmap_virt(&device->regmap, RSCC_OFFSET_LEGACY);
  253. /* Disable SDE clock gating */
  254. _regwrite(rscc, A6XX_GPU_RSCC_RSC_STATUS0_DRV0, BIT(24));
  255. /* Setup RSC PDC handshake for sleep and wakeup */
  256. _regwrite(rscc, A6XX_RSCC_PDC_SLAVE_ID_DRV0, 1);
  257. _regwrite(rscc, A6XX_RSCC_HIDDEN_TCS_CMD0_DATA, 0);
  258. _regwrite(rscc, A6XX_RSCC_HIDDEN_TCS_CMD0_ADDR, 0);
  259. _regwrite(rscc, A6XX_RSCC_HIDDEN_TCS_CMD0_DATA + RSC_CMD_OFFSET, 0);
  260. _regwrite(rscc, A6XX_RSCC_HIDDEN_TCS_CMD0_ADDR + RSC_CMD_OFFSET, 0);
  261. _regwrite(rscc, A6XX_RSCC_HIDDEN_TCS_CMD0_DATA + RSC_CMD_OFFSET * 2,
  262. 0x80000000);
  263. _regwrite(rscc, A6XX_RSCC_HIDDEN_TCS_CMD0_ADDR + RSC_CMD_OFFSET * 2,
  264. 0);
  265. _regwrite(rscc, A6XX_RSCC_OVERRIDE_START_ADDR, 0);
  266. _regwrite(rscc, A6XX_RSCC_PDC_SEQ_START_ADDR, 0x4520);
  267. _regwrite(rscc, A6XX_RSCC_PDC_MATCH_VALUE_LO, 0x4510);
  268. _regwrite(rscc, A6XX_RSCC_PDC_MATCH_VALUE_HI, 0x4514);
  269. /* Load RSC sequencer uCode for sleep and wakeup */
  270. if (adreno_is_a650_family(adreno_dev)) {
  271. _regwrite(rscc, A6XX_RSCC_SEQ_MEM_0_DRV0, 0xEAAAE5A0);
  272. _regwrite(rscc, A6XX_RSCC_SEQ_MEM_0_DRV0 + 1, 0xE1A1EBAB);
  273. _regwrite(rscc, A6XX_RSCC_SEQ_MEM_0_DRV0 + 2, 0xA2E0A581);
  274. _regwrite(rscc, A6XX_RSCC_SEQ_MEM_0_DRV0 + 3, 0xECAC82E2);
  275. _regwrite(rscc, A6XX_RSCC_SEQ_MEM_0_DRV0 + 4, 0x0020EDAD);
  276. } else {
  277. _regwrite(rscc, A6XX_RSCC_SEQ_MEM_0_DRV0, 0xA7A506A0);
  278. _regwrite(rscc, A6XX_RSCC_SEQ_MEM_0_DRV0 + 1, 0xA1E6A6E7);
  279. _regwrite(rscc, A6XX_RSCC_SEQ_MEM_0_DRV0 + 2, 0xA2E081E1);
  280. _regwrite(rscc, A6XX_RSCC_SEQ_MEM_0_DRV0 + 3, 0xE9A982E2);
  281. _regwrite(rscc, A6XX_RSCC_SEQ_MEM_0_DRV0 + 4, 0x0020E8A8);
  282. }
  283. }
  284. int a6xx_load_pdc_ucode(struct adreno_device *adreno_dev)
  285. {
  286. struct a6xx_gmu_device *gmu = to_a6xx_gmu(adreno_dev);
  287. struct resource *res_pdc, *res_cfg, *res_seq;
  288. unsigned int cfg_offset, seq_offset;
  289. void __iomem *cfg = NULL, *seq = NULL;
  290. const struct adreno_a6xx_core *a6xx_core = to_a6xx_core(adreno_dev);
  291. u32 vrm_resource_addr = cmd_db_read_addr("vrm.soc");
  292. u32 xo_resource_addr = cmd_db_read_addr("xo.lvl");
  293. u32 cx_res_addr = cmd_db_read_addr("cx.lvl");
  294. u32 mx_res_addr = cmd_db_read_addr("mx.lvl");
  295. if (!xo_resource_addr) {
  296. dev_err(&gmu->pdev->dev,
  297. "Failed to get 'xo.lvl' addr from cmd_db\n");
  298. return -ENOENT;
  299. }
  300. if (!cx_res_addr) {
  301. dev_err(&gmu->pdev->dev,
  302. "Failed to get 'cx.lvl' addr from cmd_db\n");
  303. return -ENOENT;
  304. }
  305. if (!mx_res_addr) {
  306. dev_err(&gmu->pdev->dev,
  307. "Failed to get 'mx.lvl' addr from cmd_db\n");
  308. return -ENOENT;
  309. }
  310. /*
  311. * Older A6x platforms specified PDC registers in the DT using a
  312. * single base pointer that encompassed the entire PDC range. Current
  313. * targets specify the individual GPU-owned PDC register blocks
  314. * (sequence and config).
  315. *
  316. * This code handles both possibilities and generates individual
  317. * pointers to the GPU PDC blocks, either as offsets from the single
  318. * base, or as directly specified ranges.
  319. *
  320. * PDC programming has moved to AOP for newer A6x platforms.
  321. * However registers to enable GPU PDC and set the sequence start
  322. * address still need to be programmed.
  323. */
  324. /* Offsets from the base PDC (if no PDC subsections in the DTSI) */
  325. if (adreno_is_a640v2(adreno_dev)) {
  326. cfg_offset = 0x90000;
  327. seq_offset = 0x290000;
  328. } else {
  329. cfg_offset = 0x80000;
  330. seq_offset = 0x280000;
  331. }
  332. /* Get pointers to each of the possible PDC resources */
  333. res_pdc = platform_get_resource_byname(gmu->pdev, IORESOURCE_MEM,
  334. "kgsl_gmu_pdc_reg");
  335. res_cfg = platform_get_resource_byname(gmu->pdev, IORESOURCE_MEM,
  336. "kgsl_gmu_pdc_cfg");
  337. /*
  338. * Map the starting address for pdc_cfg programming. If the pdc_cfg
  339. * resource is not available use an offset from the base PDC resource.
  340. */
  341. if (gmu->pdc_cfg_base == NULL) {
  342. if (res_cfg)
  343. gmu->pdc_cfg_base = devm_ioremap(&gmu->pdev->dev,
  344. res_cfg->start, resource_size(res_cfg));
  345. else if (res_pdc)
  346. gmu->pdc_cfg_base = devm_ioremap(&gmu->pdev->dev,
  347. res_pdc->start + cfg_offset, 0x10000);
  348. if (!gmu->pdc_cfg_base) {
  349. dev_err(&gmu->pdev->dev, "Failed to map PDC CFG\n");
  350. return -ENODEV;
  351. }
  352. }
  353. cfg = gmu->pdc_cfg_base;
  354. /* PDC is programmed in AOP for newer platforms */
  355. if (a6xx_core->pdc_in_aop)
  356. goto done;
  357. /*
  358. * Map the starting address for pdc_seq programming. If the pdc_seq
  359. * resource is not available use an offset from the base PDC resource.
  360. */
  361. if (gmu->pdc_seq_base == NULL) {
  362. res_seq = platform_get_resource_byname(gmu->pdev, IORESOURCE_MEM,
  363. "kgsl_gmu_pdc_seq");
  364. if (res_seq)
  365. gmu->pdc_seq_base = devm_ioremap(&gmu->pdev->dev,
  366. res_seq->start, resource_size(res_seq));
  367. else if (res_pdc)
  368. gmu->pdc_seq_base = devm_ioremap(&gmu->pdev->dev,
  369. res_pdc->start + seq_offset, 0x10000);
  370. if (!gmu->pdc_seq_base) {
  371. dev_err(&gmu->pdev->dev, "Failed to map PDC SEQ\n");
  372. return -ENODEV;
  373. }
  374. }
  375. seq = gmu->pdc_seq_base;
  376. /* Load PDC sequencer uCode for power up and power down sequence */
  377. _regwrite(seq, PDC_GPU_SEQ_MEM_0, 0xFEBEA1E1);
  378. _regwrite(seq, PDC_GPU_SEQ_MEM_0 + 1, 0xA5A4A3A2);
  379. _regwrite(seq, PDC_GPU_SEQ_MEM_0 + 2, 0x8382A6E0);
  380. _regwrite(seq, PDC_GPU_SEQ_MEM_0 + 3, 0xBCE3E284);
  381. _regwrite(seq, PDC_GPU_SEQ_MEM_0 + 4, 0x002081FC);
  382. /* Set TCS commands used by PDC sequence for low power modes */
  383. _regwrite(cfg, PDC_GPU_TCS1_CMD_ENABLE_BANK, 7);
  384. _regwrite(cfg, PDC_GPU_TCS1_CMD_WAIT_FOR_CMPL_BANK, 0);
  385. _regwrite(cfg, PDC_GPU_TCS1_CONTROL, 0);
  386. _regwrite(cfg, PDC_GPU_TCS1_CMD0_MSGID, 0x10108);
  387. _regwrite(cfg, PDC_GPU_TCS1_CMD0_ADDR, mx_res_addr);
  388. _regwrite(cfg, PDC_GPU_TCS1_CMD0_DATA, 1);
  389. _regwrite(cfg, PDC_GPU_TCS1_CMD0_MSGID + PDC_CMD_OFFSET, 0x10108);
  390. _regwrite(cfg, PDC_GPU_TCS1_CMD0_ADDR + PDC_CMD_OFFSET, cx_res_addr);
  391. _regwrite(cfg, PDC_GPU_TCS1_CMD0_DATA + PDC_CMD_OFFSET, 0x0);
  392. _regwrite(cfg, PDC_GPU_TCS1_CMD0_MSGID + PDC_CMD_OFFSET * 2, 0x10108);
  393. _regwrite(cfg, PDC_GPU_TCS1_CMD0_ADDR + PDC_CMD_OFFSET * 2,
  394. xo_resource_addr);
  395. _regwrite(cfg, PDC_GPU_TCS1_CMD0_DATA + PDC_CMD_OFFSET * 2, 0x0);
  396. if (vrm_resource_addr && adreno_is_a620(adreno_dev)) {
  397. _regwrite(cfg, PDC_GPU_TCS1_CMD0_MSGID + PDC_CMD_OFFSET * 3,
  398. 0x10108);
  399. _regwrite(cfg, PDC_GPU_TCS1_CMD0_ADDR + PDC_CMD_OFFSET * 3,
  400. vrm_resource_addr + 0x4);
  401. _regwrite(cfg, PDC_GPU_TCS1_CMD0_DATA + PDC_CMD_OFFSET * 3,
  402. 0x0);
  403. }
  404. _regwrite(cfg, PDC_GPU_TCS3_CMD_ENABLE_BANK, 7);
  405. _regwrite(cfg, PDC_GPU_TCS3_CMD_WAIT_FOR_CMPL_BANK, 0);
  406. _regwrite(cfg, PDC_GPU_TCS3_CONTROL, 0);
  407. _regwrite(cfg, PDC_GPU_TCS3_CMD0_MSGID, 0x10108);
  408. _regwrite(cfg, PDC_GPU_TCS3_CMD0_ADDR, mx_res_addr);
  409. _regwrite(cfg, PDC_GPU_TCS3_CMD0_DATA, 2);
  410. _regwrite(cfg, PDC_GPU_TCS3_CMD0_MSGID + PDC_CMD_OFFSET, 0x10108);
  411. _regwrite(cfg, PDC_GPU_TCS3_CMD0_ADDR + PDC_CMD_OFFSET, cx_res_addr);
  412. if (adreno_is_a618(adreno_dev) || adreno_is_a619(adreno_dev) ||
  413. adreno_is_a650_family(adreno_dev))
  414. _regwrite(cfg, PDC_GPU_TCS3_CMD0_DATA + PDC_CMD_OFFSET, 0x2);
  415. else
  416. _regwrite(cfg, PDC_GPU_TCS3_CMD0_DATA + PDC_CMD_OFFSET, 0x3);
  417. _regwrite(cfg, PDC_GPU_TCS3_CMD0_MSGID + PDC_CMD_OFFSET * 2, 0x10108);
  418. _regwrite(cfg, PDC_GPU_TCS3_CMD0_ADDR + PDC_CMD_OFFSET * 2,
  419. xo_resource_addr);
  420. _regwrite(cfg, PDC_GPU_TCS3_CMD0_DATA + PDC_CMD_OFFSET * 2, 0x3);
  421. if (vrm_resource_addr && adreno_is_a620(adreno_dev)) {
  422. _regwrite(cfg, PDC_GPU_TCS3_CMD0_MSGID + PDC_CMD_OFFSET * 3,
  423. 0x10108);
  424. _regwrite(cfg, PDC_GPU_TCS3_CMD0_ADDR + PDC_CMD_OFFSET * 3,
  425. vrm_resource_addr + 0x4);
  426. _regwrite(cfg, PDC_GPU_TCS3_CMD0_DATA + PDC_CMD_OFFSET * 3,
  427. 0x1);
  428. }
  429. done:
  430. /* Setup GPU PDC */
  431. _regwrite(cfg, PDC_GPU_SEQ_START_ADDR, 0);
  432. _regwrite(cfg, PDC_GPU_ENABLE_PDC, PDC_ENABLE_REG_VALUE);
  433. /* ensure no writes happen before the uCode is fully written */
  434. wmb();
  435. return 0;
  436. }
  437. /* GMU timeouts */
  438. #define GMU_IDLE_TIMEOUT 100 /* ms */
  439. #define GMU_START_TIMEOUT 100 /* ms */
  440. #define GPU_START_TIMEOUT 100 /* ms */
  441. #define GPU_RESET_TIMEOUT 1 /* ms */
  442. #define GPU_RESET_TIMEOUT_US 10 /* us */
  443. /*
  444. * The lowest 16 bits of this value are the number of XO clock cycles
  445. * for main hysteresis. This is the first hysteresis. Here we set it
  446. * to 0x1680 cycles, or 300 us. The highest 16 bits of this value are
  447. * the number of XO clock cycles for short hysteresis. This happens
  448. * after main hysteresis. Here we set it to 0xA cycles, or 0.5 us.
  449. */
  450. #define A6X_GMU_LONG_IFPC_HYST FIELD_PREP(GENMASK(15, 0), 0x1680)
  451. #define A6X_GMU_SHORT_IFPC_HYST FIELD_PREP(GENMASK(31, 16), 0xA)
  452. /* Minimum IFPC timer (200usec) allowed to override default value */
  453. #define A6X_GMU_LONG_IFPC_HYST_FLOOR FIELD_PREP(GENMASK(15, 0), 0x0F00)
  454. /*
  455. * a6xx_gmu_power_config() - Configure and enable GMU's low power mode
  456. * setting based on ADRENO feature flags.
  457. * @adreno_dev: Pointer to adreno device
  458. */
  459. static void a6xx_gmu_power_config(struct adreno_device *adreno_dev)
  460. {
  461. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  462. struct a6xx_gmu_device *gmu = to_a6xx_gmu(adreno_dev);
  463. /* Configure registers for idle setting. The setting is cumulative */
  464. /* Disable GMU WB/RB buffer and caches at boot */
  465. gmu_core_regwrite(device, A6XX_GMU_SYS_BUS_CONFIG, 0x1);
  466. gmu_core_regwrite(device, A6XX_GMU_ICACHE_CONFIG, 0x1);
  467. gmu_core_regwrite(device, A6XX_GMU_DCACHE_CONFIG, 0x1);
  468. gmu_core_regwrite(device,
  469. A6XX_GMU_PWR_COL_INTER_FRAME_CTRL, 0x9C40400);
  470. if (gmu->idle_level == GPU_HW_IFPC) {
  471. gmu_core_regwrite(device, A6XX_GMU_PWR_COL_INTER_FRAME_HYST,
  472. A6X_GMU_SHORT_IFPC_HYST | adreno_dev->ifpc_hyst);
  473. gmu_core_regrmw(device, A6XX_GMU_PWR_COL_INTER_FRAME_CTRL,
  474. IFPC_ENABLE_MASK, IFPC_ENABLE_MASK);
  475. gmu_core_regwrite(device, A6XX_GMU_PWR_COL_SPTPRAC_HYST,
  476. A6X_GMU_SHORT_IFPC_HYST | adreno_dev->ifpc_hyst);
  477. gmu_core_regrmw(device, A6XX_GMU_PWR_COL_INTER_FRAME_CTRL,
  478. SPTP_ENABLE_MASK, SPTP_ENABLE_MASK);
  479. }
  480. /* Enable RPMh GPU client */
  481. gmu_core_regrmw(device, A6XX_GMU_RPMH_CTRL, RPMH_ENABLE_MASK,
  482. RPMH_ENABLE_MASK);
  483. }
  484. static void gmu_ao_sync_event(struct adreno_device *adreno_dev)
  485. {
  486. unsigned long flags;
  487. u64 ticks;
  488. local_irq_save(flags);
  489. /* Read GMU always on register */
  490. ticks = a6xx_read_alwayson(adreno_dev);
  491. /* Trace the GMU time to create a mapping to ftrace time */
  492. trace_gmu_ao_sync(ticks);
  493. local_irq_restore(flags);
  494. }
  495. void a6xx_gmu_disable_gdsc(struct adreno_device *adreno_dev)
  496. {
  497. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  498. struct kgsl_pwrctrl *pwr = &device->pwrctrl;
  499. if (ADRENO_QUIRK(adreno_dev, ADRENO_QUIRK_CX_GDSC))
  500. regulator_set_mode(pwr->cx_gdsc, REGULATOR_MODE_IDLE);
  501. kgsl_pwrctrl_disable_cx_gdsc(device);
  502. if (ADRENO_QUIRK(adreno_dev, ADRENO_QUIRK_CX_GDSC))
  503. regulator_set_mode(pwr->cx_gdsc, REGULATOR_MODE_NORMAL);
  504. }
  505. int a6xx_gmu_device_start(struct adreno_device *adreno_dev)
  506. {
  507. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  508. struct a6xx_gmu_device *gmu = to_a6xx_gmu(adreno_dev);
  509. u32 val = 0x00000100;
  510. u32 mask = 0x000001FF;
  511. gmu_core_reset_trace_header(&gmu->trace);
  512. gmu_ao_sync_event(adreno_dev);
  513. /* Check for 0xBABEFACE on legacy targets */
  514. if (gmu->ver.core <= 0x20010004) {
  515. val = 0xBABEFACE;
  516. mask = 0xFFFFFFFF;
  517. }
  518. /* Bring GMU out of reset */
  519. gmu_core_regwrite(device, A6XX_GMU_CM3_SYSRESET, 0);
  520. /* Make sure the write is posted before moving ahead */
  521. wmb();
  522. if (gmu_core_timed_poll_check(device,
  523. A6XX_GMU_CM3_FW_INIT_RESULT,
  524. val, GMU_START_TIMEOUT, mask)) {
  525. dev_err(&gmu->pdev->dev, "GMU doesn't boot\n");
  526. gmu_core_fault_snapshot(device);
  527. return -ETIMEDOUT;
  528. }
  529. return 0;
  530. }
  531. /*
  532. * a6xx_gmu_hfi_start() - Write registers and start HFI.
  533. * @device: Pointer to KGSL device
  534. */
  535. int a6xx_gmu_hfi_start(struct adreno_device *adreno_dev)
  536. {
  537. struct a6xx_gmu_device *gmu = to_a6xx_gmu(adreno_dev);
  538. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  539. gmu_core_regwrite(device, A6XX_GMU_HFI_CTRL_INIT, 1);
  540. if (gmu_core_timed_poll_check(device,
  541. A6XX_GMU_HFI_CTRL_STATUS,
  542. BIT(0),
  543. GMU_START_TIMEOUT,
  544. BIT(0))) {
  545. dev_err(&gmu->pdev->dev, "GMU HFI init failed\n");
  546. gmu_core_fault_snapshot(device);
  547. return -ETIMEDOUT;
  548. }
  549. return 0;
  550. }
  551. int a6xx_rscc_wakeup_sequence(struct adreno_device *adreno_dev)
  552. {
  553. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  554. struct a6xx_gmu_device *gmu = to_a6xx_gmu(adreno_dev);
  555. struct device *dev = &gmu->pdev->dev;
  556. int val;
  557. /* Skip wakeup sequence if we didn't do the sleep sequence */
  558. if (!test_bit(GMU_PRIV_RSCC_SLEEP_DONE, &gmu->flags))
  559. return 0;
  560. /* A660 has a replacement register */
  561. if (adreno_is_a662(adreno_dev) || adreno_is_a621(adreno_dev))
  562. gmu_core_regread(device, A662_GPU_CC_GX_DOMAIN_MISC3, &val);
  563. else if (adreno_is_a660(ADRENO_DEVICE(device)) ||
  564. adreno_is_a663(adreno_dev))
  565. gmu_core_regread(device, A6XX_GPU_CC_GX_DOMAIN_MISC3, &val);
  566. else
  567. gmu_core_regread(device, A6XX_GPU_CC_GX_DOMAIN_MISC, &val);
  568. if (!(val & 0x1))
  569. dev_info_ratelimited(&gmu->pdev->dev,
  570. "GMEM CLAMP IO not set while GFX rail off\n");
  571. /* RSC wake sequence */
  572. gmu_core_regwrite(device, A6XX_GMU_RSCC_CONTROL_REQ, BIT(1));
  573. /* Write request before polling */
  574. wmb();
  575. if (gmu_core_timed_poll_check(device,
  576. A6XX_GMU_RSCC_CONTROL_ACK,
  577. BIT(1),
  578. GPU_START_TIMEOUT,
  579. BIT(1))) {
  580. dev_err(dev, "Failed to do GPU RSC power on\n");
  581. return -ETIMEDOUT;
  582. }
  583. if (timed_poll_check_rscc(device,
  584. A6XX_RSCC_SEQ_BUSY_DRV0,
  585. 0,
  586. GPU_START_TIMEOUT,
  587. 0xFFFFFFFF))
  588. goto error_rsc;
  589. gmu_core_regwrite(device, A6XX_GMU_RSCC_CONTROL_REQ, 0);
  590. clear_bit(GMU_PRIV_RSCC_SLEEP_DONE, &gmu->flags);
  591. return 0;
  592. error_rsc:
  593. dev_err(dev, "GPU RSC sequence stuck in waking up GPU\n");
  594. return -ETIMEDOUT;
  595. }
  596. int a6xx_rscc_sleep_sequence(struct adreno_device *adreno_dev)
  597. {
  598. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  599. struct a6xx_gmu_device *gmu = to_a6xx_gmu(adreno_dev);
  600. int ret;
  601. if (!test_bit(GMU_PRIV_FIRST_BOOT_DONE, &gmu->flags))
  602. return 0;
  603. if (test_bit(GMU_PRIV_RSCC_SLEEP_DONE, &gmu->flags))
  604. return 0;
  605. gmu_core_regwrite(device, A6XX_GMU_CM3_SYSRESET, 1);
  606. /* Make sure M3 is in reset before going on */
  607. wmb();
  608. gmu_core_regread(device, A6XX_GPU_GMU_CX_GMU_PWR_COL_CP_RESP,
  609. &gmu->log_wptr_retention);
  610. gmu_core_regwrite(device, A6XX_GMU_RSCC_CONTROL_REQ, 1);
  611. /* Make sure the request completes before continuing */
  612. wmb();
  613. ret = timed_poll_check_rscc(device,
  614. A6XX_GPU_RSCC_RSC_STATUS0_DRV0,
  615. BIT(16),
  616. GPU_START_TIMEOUT,
  617. BIT(16));
  618. if (ret) {
  619. dev_err(&gmu->pdev->dev, "GPU RSC power off fail\n");
  620. return -ETIMEDOUT;
  621. }
  622. gmu_core_regwrite(device, A6XX_GMU_RSCC_CONTROL_REQ, 0);
  623. if (adreno_dev->lm_enabled)
  624. gmu_core_regwrite(device, A6XX_GMU_AO_SPARE_CNTL, 0);
  625. set_bit(GMU_PRIV_RSCC_SLEEP_DONE, &gmu->flags);
  626. return 0;
  627. }
  628. static struct kgsl_memdesc *find_gmu_memdesc(struct a6xx_gmu_device *gmu,
  629. u32 addr, u32 size)
  630. {
  631. int i;
  632. for (i = 0; i < gmu->global_entries; i++) {
  633. struct kgsl_memdesc *md = &gmu->gmu_globals[i];
  634. if ((addr >= md->gmuaddr) &&
  635. (((addr + size) <= (md->gmuaddr + md->size))))
  636. return md;
  637. }
  638. return NULL;
  639. }
  640. static int find_vma_block(struct a6xx_gmu_device *gmu, u32 addr, u32 size)
  641. {
  642. int i;
  643. for (i = 0; i < GMU_MEM_TYPE_MAX; i++) {
  644. struct gmu_vma_entry *vma = &gmu->vma[i];
  645. if ((addr >= vma->start) &&
  646. ((addr + size) <= (vma->start + vma->size)))
  647. return i;
  648. }
  649. return -ENOENT;
  650. }
  651. #define MAX_GMUFW_SIZE 0x8000 /* in bytes */
  652. static int _load_legacy_gmu_fw(struct kgsl_device *device,
  653. struct a6xx_gmu_device *gmu)
  654. {
  655. const struct firmware *fw = gmu->fw_image;
  656. if (fw->size > MAX_GMUFW_SIZE)
  657. return -EINVAL;
  658. gmu_core_blkwrite(device, A6XX_GMU_CM3_ITCM_START, fw->data,
  659. fw->size);
  660. /* Proceed only after the FW is written */
  661. wmb();
  662. return 0;
  663. }
  664. static void load_tcm(struct adreno_device *adreno_dev, const u8 *src,
  665. u32 tcm_start, u32 base, const struct gmu_block_header *blk)
  666. {
  667. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  668. u32 tcm_offset = tcm_start + ((blk->addr - base)/sizeof(u32));
  669. void __iomem *addr = kgsl_regmap_virt(&device->regmap, tcm_offset);
  670. memcpy_toio(addr, src, blk->size);
  671. }
  672. int a6xx_gmu_load_fw(struct adreno_device *adreno_dev)
  673. {
  674. struct a6xx_gmu_device *gmu = to_a6xx_gmu(adreno_dev);
  675. const u8 *fw = (const u8 *)gmu->fw_image->data;
  676. if (adreno_is_a630(adreno_dev) || adreno_is_a615_family(adreno_dev))
  677. return _load_legacy_gmu_fw(KGSL_DEVICE(adreno_dev), gmu);
  678. while (fw < gmu->fw_image->data + gmu->fw_image->size) {
  679. const struct gmu_block_header *blk =
  680. (const struct gmu_block_header *)fw;
  681. int id;
  682. fw += sizeof(*blk);
  683. /* Don't deal with zero size blocks */
  684. if (blk->size == 0)
  685. continue;
  686. id = find_vma_block(gmu, blk->addr, blk->size);
  687. if (id < 0) {
  688. dev_err(&gmu->pdev->dev,
  689. "Unknown block in GMU FW addr:0x%x size:0x%x\n",
  690. blk->addr, blk->size);
  691. return -EINVAL;
  692. }
  693. if (id == GMU_ITCM) {
  694. load_tcm(adreno_dev, fw,
  695. A6XX_GMU_CM3_ITCM_START,
  696. gmu->vma[GMU_ITCM].start, blk);
  697. } else if (id == GMU_DTCM) {
  698. load_tcm(adreno_dev, fw,
  699. A6XX_GMU_CM3_DTCM_START,
  700. gmu->vma[GMU_DTCM].start, blk);
  701. } else {
  702. struct kgsl_memdesc *md =
  703. find_gmu_memdesc(gmu, blk->addr, blk->size);
  704. if (!md) {
  705. dev_err(&gmu->pdev->dev,
  706. "No backing memory for GMU FW block addr:0x%x size:0x%x\n",
  707. blk->addr, blk->size);
  708. return -EINVAL;
  709. }
  710. memcpy(md->hostptr + (blk->addr - md->gmuaddr), fw,
  711. blk->size);
  712. }
  713. fw += blk->size;
  714. }
  715. /* Proceed only after the FW is written */
  716. wmb();
  717. return 0;
  718. }
  719. static const char *oob_to_str(enum oob_request req)
  720. {
  721. if (req == oob_gpu)
  722. return "oob_gpu";
  723. else if (req == oob_perfcntr)
  724. return "oob_perfcntr";
  725. else if (req == oob_boot_slumber)
  726. return "oob_boot_slumber";
  727. else if (req == oob_dcvs)
  728. return "oob_dcvs";
  729. return "unknown";
  730. }
  731. static void trigger_reset_recovery(struct adreno_device *adreno_dev,
  732. enum oob_request req)
  733. {
  734. /*
  735. * Trigger recovery for perfcounter oob only since only
  736. * perfcounter oob can happen alongside an actively rendering gpu.
  737. */
  738. if (req != oob_perfcntr)
  739. return;
  740. if (adreno_dev->dispatch_ops && adreno_dev->dispatch_ops->fault)
  741. adreno_dev->dispatch_ops->fault(adreno_dev,
  742. ADRENO_GMU_FAULT_SKIP_SNAPSHOT);
  743. }
  744. int a6xx_gmu_oob_set(struct kgsl_device *device,
  745. enum oob_request req)
  746. {
  747. struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
  748. struct a6xx_gmu_device *gmu = to_a6xx_gmu(adreno_dev);
  749. int ret = 0;
  750. int set, check;
  751. if (req == oob_perfcntr && gmu->num_oob_perfcntr++)
  752. return 0;
  753. if (adreno_is_a630(adreno_dev) || adreno_is_a615_family(adreno_dev)) {
  754. set = BIT(req + 16);
  755. check = BIT(req + 24);
  756. } else {
  757. /*
  758. * The legacy targets have special bits that aren't supported on
  759. * newer implementations
  760. */
  761. if (req >= oob_boot_slumber) {
  762. dev_err(&gmu->pdev->dev,
  763. "Unsupported OOB request %s\n",
  764. oob_to_str(req));
  765. return -EINVAL;
  766. }
  767. set = BIT(30 - req * 2);
  768. check = BIT(31 - req);
  769. }
  770. gmu_core_regwrite(device, A6XX_GMU_HOST2GMU_INTR_SET, set);
  771. if (gmu_core_timed_poll_check(device, A6XX_GMU_GMU2HOST_INTR_INFO,
  772. check, GPU_START_TIMEOUT, check)) {
  773. if (req == oob_perfcntr)
  774. gmu->num_oob_perfcntr--;
  775. gmu_core_fault_snapshot(device);
  776. ret = -ETIMEDOUT;
  777. WARN(1, "OOB request %s timed out\n", oob_to_str(req));
  778. trigger_reset_recovery(adreno_dev, req);
  779. }
  780. gmu_core_regwrite(device, A6XX_GMU_GMU2HOST_INTR_CLR, check);
  781. trace_kgsl_gmu_oob_set(set);
  782. return ret;
  783. }
  784. void a6xx_gmu_oob_clear(struct kgsl_device *device,
  785. enum oob_request req)
  786. {
  787. struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
  788. struct a6xx_gmu_device *gmu = to_a6xx_gmu(adreno_dev);
  789. int clear;
  790. if (req == oob_perfcntr && --gmu->num_oob_perfcntr)
  791. return;
  792. if (adreno_is_a630(adreno_dev) || adreno_is_a615_family(adreno_dev)) {
  793. clear = BIT(req + 24);
  794. } else {
  795. clear = BIT(31 - req * 2);
  796. if (req >= oob_boot_slumber) {
  797. dev_err(&gmu->pdev->dev, "Unsupported OOB clear %s\n",
  798. oob_to_str(req));
  799. return;
  800. }
  801. }
  802. gmu_core_regwrite(device, A6XX_GMU_HOST2GMU_INTR_SET, clear);
  803. trace_kgsl_gmu_oob_clear(clear);
  804. }
  805. void a6xx_gmu_irq_enable(struct adreno_device *adreno_dev)
  806. {
  807. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  808. struct a6xx_gmu_device *gmu = to_a6xx_gmu(adreno_dev);
  809. struct a6xx_hfi *hfi = &gmu->hfi;
  810. /* Clear pending IRQs and Unmask needed IRQs */
  811. gmu_core_regwrite(device, A6XX_GMU_GMU2HOST_INTR_CLR, 0xffffffff);
  812. gmu_core_regwrite(device, A6XX_GMU_AO_HOST_INTERRUPT_CLR, 0xffffffff);
  813. gmu_core_regwrite(device, A6XX_GMU_GMU2HOST_INTR_MASK,
  814. (unsigned int)~HFI_IRQ_MASK);
  815. gmu_core_regwrite(device, A6XX_GMU_AO_HOST_INTERRUPT_MASK,
  816. (unsigned int)~GMU_AO_INT_MASK);
  817. /* Enable all IRQs on host */
  818. enable_irq(hfi->irq);
  819. enable_irq(gmu->irq);
  820. }
  821. void a6xx_gmu_irq_disable(struct adreno_device *adreno_dev)
  822. {
  823. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  824. struct a6xx_gmu_device *gmu = to_a6xx_gmu(adreno_dev);
  825. struct a6xx_hfi *hfi = &gmu->hfi;
  826. /* Disable all IRQs on host */
  827. disable_irq(gmu->irq);
  828. disable_irq(hfi->irq);
  829. /* Mask all IRQs and clear pending IRQs */
  830. gmu_core_regwrite(device, A6XX_GMU_GMU2HOST_INTR_MASK, 0xffffffff);
  831. gmu_core_regwrite(device, A6XX_GMU_AO_HOST_INTERRUPT_MASK, 0xffffffff);
  832. gmu_core_regwrite(device, A6XX_GMU_GMU2HOST_INTR_CLR, 0xffffffff);
  833. gmu_core_regwrite(device, A6XX_GMU_AO_HOST_INTERRUPT_CLR, 0xffffffff);
  834. }
  835. static int a6xx_gmu_hfi_start_msg(struct adreno_device *adreno_dev)
  836. {
  837. struct hfi_start_cmd req;
  838. /*
  839. * This HFI was not supported in legacy firmware and this quirk
  840. * serves as a better means to identify targets that depend on
  841. * legacy firmware.
  842. */
  843. if (!ADRENO_QUIRK(adreno_dev, ADRENO_QUIRK_HFI_USE_REG)) {
  844. int ret;
  845. ret = CMD_MSG_HDR(req, H2F_MSG_START);
  846. if (ret)
  847. return ret;
  848. return a6xx_hfi_send_generic_req(adreno_dev, &req, sizeof(req));
  849. }
  850. return 0;
  851. }
  852. #define FREQ_VOTE(idx, ack) (((idx) & 0xFF) | (((ack) & 0xF) << 28))
  853. #define BW_VOTE(idx) ((((idx) & 0xFFF) << 12) | ((idx) & 0xFFF))
  854. #define CLKSET_OPTION_ATLEAST 3
  855. /*
  856. * a6xx_gmu_dcvs_nohfi() - request GMU to do DCVS without using HFI
  857. * @device: Pointer to KGSL device
  858. * @perf_idx: Index into GPU performance level table defined in
  859. * HFI DCVS table message
  860. * @bw_idx: Index into GPU b/w table defined in HFI b/w table message
  861. *
  862. */
  863. static int a6xx_gmu_dcvs_nohfi(struct kgsl_device *device,
  864. unsigned int perf_idx, unsigned int bw_idx)
  865. {
  866. int ret;
  867. gmu_core_regwrite(device, A6XX_GMU_DCVS_ACK_OPTION, DCVS_ACK_NONBLOCK);
  868. gmu_core_regwrite(device, A6XX_GMU_DCVS_PERF_SETTING,
  869. FREQ_VOTE(perf_idx, CLKSET_OPTION_ATLEAST));
  870. gmu_core_regwrite(device, A6XX_GMU_DCVS_BW_SETTING, BW_VOTE(bw_idx));
  871. ret = a6xx_gmu_oob_set(device, oob_dcvs);
  872. if (ret == 0)
  873. gmu_core_regread(device, A6XX_GMU_DCVS_RETURN, &ret);
  874. a6xx_gmu_oob_clear(device, oob_dcvs);
  875. return ret;
  876. }
  877. static u32 a6xx_rscc_tcsm_drv0_status_reglist[] = {
  878. A6XX_RSCC_TCS0_DRV0_STATUS,
  879. A6XX_RSCC_TCS1_DRV0_STATUS,
  880. A6XX_RSCC_TCS2_DRV0_STATUS,
  881. A6XX_RSCC_TCS3_DRV0_STATUS,
  882. A6XX_RSCC_TCS4_DRV0_STATUS,
  883. A6XX_RSCC_TCS5_DRV0_STATUS,
  884. A6XX_RSCC_TCS6_DRV0_STATUS,
  885. A6XX_RSCC_TCS7_DRV0_STATUS,
  886. A6XX_RSCC_TCS8_DRV0_STATUS,
  887. A6XX_RSCC_TCS9_DRV0_STATUS,
  888. };
  889. static int a6xx_complete_rpmh_votes(struct adreno_device *adreno_dev,
  890. unsigned int timeout)
  891. {
  892. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  893. /* Number of TCS commands are increased to 10 from A650 family onwards */
  894. int count = adreno_is_a650_family(adreno_dev) ?
  895. ARRAY_SIZE(a6xx_rscc_tcsm_drv0_status_reglist) : 4;
  896. int i, ret = 0;
  897. for (i = 0; i < count; i++)
  898. ret |= timed_poll_check_rscc(device, a6xx_rscc_tcsm_drv0_status_reglist[i],
  899. BIT(0), timeout, BIT(0));
  900. if (ret)
  901. dev_err(device->dev, "RPMH votes timedout: %d\n", ret);
  902. return ret;
  903. }
  904. #define SPTPRAC_CTRL_TIMEOUT 10 /* ms */
  905. /*
  906. * a6xx_gmu_sptprac_enable() - Power on SPTPRAC
  907. * @adreno_dev: Pointer to Adreno device
  908. */
  909. int a6xx_gmu_sptprac_enable(struct adreno_device *adreno_dev)
  910. {
  911. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  912. struct a6xx_gmu_device *gmu = to_a6xx_gmu(adreno_dev);
  913. /* Only certain targets have sptprac */
  914. if (!adreno_is_a630(adreno_dev) && !adreno_is_a615_family(adreno_dev))
  915. return 0;
  916. if (test_bit(ADRENO_DEVICE_GPU_REGULATOR_ENABLED, &adreno_dev->priv))
  917. return 0;
  918. /* GMU enabled a630 and a615 targets */
  919. gmu_core_regwrite(device, A6XX_GMU_GX_SPTPRAC_POWER_CONTROL,
  920. SPTPRAC_POWERON_CTRL_MASK);
  921. if (gmu_core_timed_poll_check(device,
  922. A6XX_GMU_SPTPRAC_PWR_CLK_STATUS,
  923. SPTPRAC_POWERON_STATUS_MASK,
  924. SPTPRAC_CTRL_TIMEOUT,
  925. SPTPRAC_POWERON_STATUS_MASK)) {
  926. dev_err(&gmu->pdev->dev, "power on SPTPRAC fail\n");
  927. gmu_core_fault_snapshot(device);
  928. return -ETIMEDOUT;
  929. }
  930. set_bit(ADRENO_DEVICE_GPU_REGULATOR_ENABLED, &adreno_dev->priv);
  931. return 0;
  932. }
  933. /*
  934. * a6xx_gmu_sptprac_disable() - Power of SPTPRAC
  935. * @adreno_dev: Pointer to Adreno device
  936. */
  937. void a6xx_gmu_sptprac_disable(struct adreno_device *adreno_dev)
  938. {
  939. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  940. struct a6xx_gmu_device *gmu = to_a6xx_gmu(adreno_dev);
  941. /* Only certain targets have sptprac */
  942. if (!adreno_is_a630(adreno_dev) && !adreno_is_a615_family(adreno_dev))
  943. return;
  944. if (!test_and_clear_bit(ADRENO_DEVICE_GPU_REGULATOR_ENABLED,
  945. &adreno_dev->priv))
  946. return;
  947. /* GMU enabled a630 and a615 targets */
  948. /* Ensure that retention is on */
  949. gmu_core_regrmw(device, A6XX_GPU_CC_GX_GDSCR, 0,
  950. A6XX_RETAIN_FF_ENABLE_ENABLE_MASK);
  951. gmu_core_regwrite(device, A6XX_GMU_GX_SPTPRAC_POWER_CONTROL,
  952. SPTPRAC_POWEROFF_CTRL_MASK);
  953. if (gmu_core_timed_poll_check(device,
  954. A6XX_GMU_SPTPRAC_PWR_CLK_STATUS,
  955. SPTPRAC_POWEROFF_STATUS_MASK,
  956. SPTPRAC_CTRL_TIMEOUT,
  957. SPTPRAC_POWEROFF_STATUS_MASK))
  958. dev_err(&gmu->pdev->dev, "power off SPTPRAC fail\n");
  959. }
  960. #define SPTPRAC_POWER_OFF BIT(2)
  961. #define SP_CLK_OFF BIT(4)
  962. #define GX_GDSC_POWER_OFF BIT(6)
  963. #define GX_CLK_OFF BIT(7)
  964. #define is_on(val) (!(val & (GX_GDSC_POWER_OFF | GX_CLK_OFF)))
  965. bool a6xx_gmu_gx_is_on(struct adreno_device *adreno_dev)
  966. {
  967. unsigned int val;
  968. gmu_core_regread(KGSL_DEVICE(adreno_dev),
  969. A6XX_GMU_SPTPRAC_PWR_CLK_STATUS, &val);
  970. return is_on(val);
  971. }
  972. bool a619_holi_gx_is_on(struct adreno_device *adreno_dev)
  973. {
  974. unsigned int val;
  975. gmu_core_regread(KGSL_DEVICE(adreno_dev),
  976. A6XX_GMU_SPTPRAC_PWR_CLK_STATUS, &val);
  977. return is_on(val);
  978. }
  979. /*
  980. * a6xx_gmu_sptprac_is_on() - Check if SPTP is on using pwr status register
  981. * @adreno_dev - Pointer to adreno_device
  982. * This check should only be performed if the keepalive bit is set or it
  983. * can be guaranteed that the power state of the GPU will remain unchanged
  984. */
  985. bool a6xx_gmu_sptprac_is_on(struct adreno_device *adreno_dev)
  986. {
  987. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  988. unsigned int val;
  989. if (!adreno_is_a630(adreno_dev) && !adreno_is_a615_family(adreno_dev))
  990. return true;
  991. if (adreno_is_a619_holi(adreno_dev))
  992. kgsl_regread(device,
  993. A6XX_GMU_SPTPRAC_PWR_CLK_STATUS, &val);
  994. else
  995. gmu_core_regread(device, A6XX_GMU_SPTPRAC_PWR_CLK_STATUS,
  996. &val);
  997. return !(val & (SPTPRAC_POWER_OFF | SP_CLK_OFF));
  998. }
  999. /*
  1000. * a6xx_gmu_gfx_rail_on() - request GMU to power GPU at given OPP.
  1001. * @device: Pointer to KGSL device
  1002. *
  1003. */
  1004. static int a6xx_gmu_gfx_rail_on(struct adreno_device *adreno_dev)
  1005. {
  1006. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  1007. struct kgsl_pwrctrl *pwr = &device->pwrctrl;
  1008. struct a6xx_gmu_device *gmu = to_a6xx_gmu(adreno_dev);
  1009. u32 perf_idx = gmu->hfi.dcvs_table.gpu_level_num -
  1010. pwr->default_pwrlevel - 1;
  1011. u32 default_opp = gmu->hfi.dcvs_table.gx_votes[perf_idx].vote;
  1012. gmu_core_regwrite(device, A6XX_GMU_BOOT_SLUMBER_OPTION,
  1013. OOB_BOOT_OPTION);
  1014. gmu_core_regwrite(device, A6XX_GMU_GX_VOTE_IDX,
  1015. ARC_VOTE_GET_PRI(default_opp));
  1016. gmu_core_regwrite(device, A6XX_GMU_MX_VOTE_IDX,
  1017. ARC_VOTE_GET_SEC(default_opp));
  1018. a6xx_rdpm_mx_freq_update(gmu,
  1019. gmu->hfi.dcvs_table.gx_votes[perf_idx].freq);
  1020. return a6xx_gmu_oob_set(device, oob_boot_slumber);
  1021. }
  1022. static bool idle_trandition_complete(unsigned int idle_level,
  1023. unsigned int gmu_power_reg,
  1024. unsigned int sptprac_clk_reg)
  1025. {
  1026. if (idle_level != gmu_power_reg)
  1027. return false;
  1028. if (idle_level == GPU_HW_IFPC && is_on(sptprac_clk_reg))
  1029. return false;
  1030. return true;
  1031. }
  1032. static const char *idle_level_name(int level)
  1033. {
  1034. if (level == GPU_HW_ACTIVE)
  1035. return "GPU_HW_ACTIVE";
  1036. else if (level == GPU_HW_IFPC)
  1037. return "GPU_HW_IFPC";
  1038. return "";
  1039. }
  1040. int a6xx_gmu_wait_for_lowest_idle(struct adreno_device *adreno_dev)
  1041. {
  1042. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  1043. struct a6xx_gmu_device *gmu = to_a6xx_gmu(adreno_dev);
  1044. unsigned int reg, reg1, reg2, reg3, reg4, reg5, reg6, reg7, reg8;
  1045. unsigned long t;
  1046. uint64_t ts1, ts2, ts3;
  1047. ts1 = a6xx_read_alwayson(adreno_dev);
  1048. t = jiffies + msecs_to_jiffies(GMU_IDLE_TIMEOUT);
  1049. do {
  1050. gmu_core_regread(device,
  1051. A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE, &reg);
  1052. gmu_core_regread(device,
  1053. A6XX_GMU_SPTPRAC_PWR_CLK_STATUS, &reg1);
  1054. if (idle_trandition_complete(gmu->idle_level, reg, reg1))
  1055. return 0;
  1056. /* Wait 100us to reduce unnecessary AHB bus traffic */
  1057. usleep_range(10, 100);
  1058. } while (!time_after(jiffies, t));
  1059. ts2 = a6xx_read_alwayson(adreno_dev);
  1060. /* Check one last time */
  1061. gmu_core_regread(device, A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE, &reg);
  1062. gmu_core_regread(device, A6XX_GMU_SPTPRAC_PWR_CLK_STATUS, &reg1);
  1063. if (idle_trandition_complete(gmu->idle_level, reg, reg1))
  1064. return 0;
  1065. ts3 = a6xx_read_alwayson(adreno_dev);
  1066. /* Collect abort data to help with debugging */
  1067. gmu_core_regread(device, A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS, &reg2);
  1068. gmu_core_regread(device, A6XX_GMU_RBBM_INT_UNMASKED_STATUS, &reg3);
  1069. gmu_core_regread(device, A6XX_GMU_GMU_PWR_COL_KEEPALIVE, &reg4);
  1070. gmu_core_regread(device, A6XX_GMU_AO_SPARE_CNTL, &reg5);
  1071. dev_err(&gmu->pdev->dev,
  1072. "----------------------[ GMU error ]----------------------\n");
  1073. dev_err(&gmu->pdev->dev,
  1074. "Timeout waiting for lowest idle level %s\n",
  1075. idle_level_name(gmu->idle_level));
  1076. dev_err(&gmu->pdev->dev, "Start: %llx (absolute ticks)\n", ts1);
  1077. dev_err(&gmu->pdev->dev, "Poll: %llx (ticks relative to start)\n",
  1078. ts2-ts1);
  1079. dev_err(&gmu->pdev->dev, "Retry: %llx (ticks relative to poll)\n",
  1080. ts3-ts2);
  1081. dev_err(&gmu->pdev->dev,
  1082. "RPMH_POWER_STATE=%x SPTPRAC_PWR_CLK_STATUS=%x\n", reg, reg1);
  1083. dev_err(&gmu->pdev->dev, "CX_BUSY_STATUS=%x\n", reg2);
  1084. dev_err(&gmu->pdev->dev,
  1085. "RBBM_INT_UNMASKED_STATUS=%x PWR_COL_KEEPALIVE=%x\n",
  1086. reg3, reg4);
  1087. dev_err(&gmu->pdev->dev, "A6XX_GMU_AO_SPARE_CNTL=%x\n", reg5);
  1088. if (adreno_is_a660(adreno_dev)) {
  1089. u32 val;
  1090. gmu_core_regread(device, A6XX_GMU_PWR_COL_PREEMPT_KEEPALIVE, &val);
  1091. dev_err(&gmu->pdev->dev, "PWR_COL_PREEMPT_KEEPALIVE=%x\n", val);
  1092. }
  1093. /* Access GX registers only when GX is ON */
  1094. if (is_on(reg1)) {
  1095. kgsl_regread(device, A6XX_CP_STATUS_1, &reg6);
  1096. kgsl_regread(device, A6XX_CP_CP2GMU_STATUS, &reg7);
  1097. kgsl_regread(device, A6XX_CP_CONTEXT_SWITCH_CNTL, &reg8);
  1098. dev_err(&gmu->pdev->dev, "A6XX_CP_STATUS_1=%x\n", reg6);
  1099. dev_err(&gmu->pdev->dev,
  1100. "CP2GMU_STATUS=%x CONTEXT_SWITCH_CNTL=%x\n",
  1101. reg7, reg8);
  1102. }
  1103. WARN_ON(1);
  1104. gmu_core_fault_snapshot(device);
  1105. return -ETIMEDOUT;
  1106. }
  1107. /* Bitmask for GPU idle status check */
  1108. #define CXGXCPUBUSYIGNAHB BIT(30)
  1109. int a6xx_gmu_wait_for_idle(struct adreno_device *adreno_dev)
  1110. {
  1111. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  1112. struct a6xx_gmu_device *gmu = to_a6xx_gmu(adreno_dev);
  1113. unsigned int status2;
  1114. uint64_t ts1;
  1115. ts1 = a6xx_read_alwayson(adreno_dev);
  1116. if (gmu_core_timed_poll_check(device, A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS,
  1117. 0, GMU_START_TIMEOUT, CXGXCPUBUSYIGNAHB)) {
  1118. gmu_core_regread(device,
  1119. A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS2, &status2);
  1120. dev_err(&gmu->pdev->dev,
  1121. "GMU not idling: status2=0x%x %llx %llx\n",
  1122. status2, ts1,
  1123. a6xx_read_alwayson(ADRENO_DEVICE(device)));
  1124. gmu_core_fault_snapshot(device);
  1125. return -ETIMEDOUT;
  1126. }
  1127. return 0;
  1128. }
  1129. /* A6xx GMU FENCE RANGE MASK */
  1130. #define GMU_FENCE_RANGE_MASK ((0x1 << 31) | ((0xA << 2) << 18) | (0x8A0))
  1131. void a6xx_gmu_version_info(struct adreno_device *adreno_dev)
  1132. {
  1133. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  1134. struct a6xx_gmu_device *gmu = to_a6xx_gmu(adreno_dev);
  1135. /* GMU version info is at a fixed offset in the DTCM */
  1136. gmu_core_regread(device, A6XX_GMU_CM3_DTCM_START + 0xFF8,
  1137. &gmu->ver.core);
  1138. gmu_core_regread(device, A6XX_GMU_CM3_DTCM_START + 0xFF9,
  1139. &gmu->ver.core_dev);
  1140. gmu_core_regread(device, A6XX_GMU_CM3_DTCM_START + 0xFFA,
  1141. &gmu->ver.pwr);
  1142. gmu_core_regread(device, A6XX_GMU_CM3_DTCM_START + 0xFFB,
  1143. &gmu->ver.pwr_dev);
  1144. gmu_core_regread(device, A6XX_GMU_CM3_DTCM_START + 0xFFC,
  1145. &gmu->ver.hfi);
  1146. }
  1147. int a6xx_gmu_itcm_shadow(struct adreno_device *adreno_dev)
  1148. {
  1149. struct a6xx_gmu_device *gmu = to_a6xx_gmu(adreno_dev);
  1150. u32 i, *dest;
  1151. if (gmu->itcm_shadow)
  1152. return 0;
  1153. gmu->itcm_shadow = vzalloc(gmu->vma[GMU_ITCM].size);
  1154. if (!gmu->itcm_shadow)
  1155. return -ENOMEM;
  1156. dest = (u32 *)gmu->itcm_shadow;
  1157. /* FIXME: use bulk read? */
  1158. for (i = 0; i < (gmu->vma[GMU_ITCM].size >> 2); i++)
  1159. gmu_core_regread(KGSL_DEVICE(adreno_dev),
  1160. A6XX_GMU_CM3_ITCM_START + i, dest++);
  1161. return 0;
  1162. }
  1163. static void a6xx_gmu_enable_throttle_counters(
  1164. struct adreno_device *adreno_dev)
  1165. {
  1166. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  1167. u32 val;
  1168. if (!(adreno_dev->lm_enabled || adreno_dev->bcl_enabled))
  1169. return;
  1170. if (adreno_dev->lm_enabled) {
  1171. /*
  1172. * For LM throttling -
  1173. * XOCLK1: countable: 0x10
  1174. * XOCLK2: countable: 0x16 for newer hardware / 0x15 for others
  1175. * XOCLK3: countable: 0xf for newer hardware / 0x19 for others
  1176. *
  1177. * POWER_CONTROL_SELECT_0 controls counters 0 - 3, each selector
  1178. * is 8 bits wide.
  1179. */
  1180. if (adreno_is_a620(adreno_dev) || adreno_is_a650(adreno_dev))
  1181. val = (0x10 << 8) | (0x16 << 16) | (0x0f << 24);
  1182. else
  1183. val = (0x10 << 8) | (0x15 << 16) | (0x19 << 24);
  1184. } else {
  1185. /*
  1186. * When LM is not enabled, we can enable BCL throttling -
  1187. * XOCLK1: countable: 0x13 (25% throttle)
  1188. * XOCLK2: countable: 0x17 (58% throttle)
  1189. * XOCLK3: countable: 0x19 (75% throttle)
  1190. *
  1191. * POWER_CONTROL_SELECT_0 controls counters 0 - 3, each selector
  1192. * is 8 bits wide.
  1193. */
  1194. val = (0x13 << 8) | (0x17 << 16) | (0x19 << 24);
  1195. }
  1196. /* Make sure not to write over XOCLK0 */
  1197. gmu_core_regrmw(device, A6XX_GMU_CX_GMU_POWER_COUNTER_SELECT_0,
  1198. 0xffffff00, val);
  1199. gmu_core_regwrite(device, A6XX_GMU_AO_SPARE_CNTL, 1);
  1200. }
  1201. void a6xx_gmu_register_config(struct adreno_device *adreno_dev)
  1202. {
  1203. const struct adreno_a6xx_core *a6xx_core = to_a6xx_core(adreno_dev);
  1204. struct a6xx_gmu_device *gmu = to_a6xx_gmu(adreno_dev);
  1205. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  1206. u32 gmu_log_info, chipid = 0;
  1207. /* Clear any previously set cm3 fault */
  1208. atomic_set(&gmu->cm3_fault, 0);
  1209. /* Vote veto for FAL10 feature if supported*/
  1210. if (a6xx_core->veto_fal10) {
  1211. gmu_core_regwrite(device,
  1212. A6XX_GPU_GMU_CX_GMU_CX_FALNEXT_INTF, 0x1);
  1213. gmu_core_regwrite(device, A6XX_GPU_GMU_CX_GMU_CX_FAL_INTF, 0x1);
  1214. }
  1215. /* Turn on TCM retention */
  1216. gmu_core_regwrite(device, A6XX_GMU_GENERAL_7, 1);
  1217. /* Clear init result to make sure we are getting fresh value */
  1218. gmu_core_regwrite(device, A6XX_GMU_CM3_FW_INIT_RESULT, 0);
  1219. gmu_core_regwrite(device, A6XX_GMU_CM3_BOOT_CONFIG, 0x2);
  1220. gmu_core_regwrite(device, A6XX_GMU_HFI_QTBL_ADDR,
  1221. gmu->hfi.hfi_mem->gmuaddr);
  1222. gmu_core_regwrite(device, A6XX_GMU_HFI_QTBL_INFO, 1);
  1223. /*
  1224. * For A6xx GMUAO interrupt line BIT[1] is combined for ipcc
  1225. * and doorbell. Enable dbdWakeupEn interrupt for GMU to receive
  1226. * IPC interrupt.
  1227. */
  1228. if (ADRENO_FEATURE(adreno_dev, ADRENO_LSR))
  1229. gmu_core_regwrite(device, A6XX_GMU_AO_INTERRUPT_EN, BIT(1));
  1230. gmu_core_regwrite(device, A6XX_GMU_AHB_FENCE_RANGE_0,
  1231. GMU_FENCE_RANGE_MASK);
  1232. /*
  1233. * Make sure that CM3 state is at reset value. Snapshot is changing
  1234. * NMI bit and if we boot up GMU with NMI bit set GMU will boot
  1235. * straight in to NMI handler without executing __main code
  1236. */
  1237. gmu_core_regwrite(device, A6XX_GMU_CM3_CFG, 0x4052);
  1238. /**
  1239. * We may have asserted gbif halt as part of reset sequence which may
  1240. * not get cleared if the gdsc was not reset. So clear it before
  1241. * attempting GMU boot.
  1242. */
  1243. if (!adreno_is_a630(adreno_dev))
  1244. kgsl_regwrite(device, A6XX_GBIF_HALT, 0x0);
  1245. /* Set vrb address before starting GMU */
  1246. if (!IS_ERR_OR_NULL(gmu->vrb))
  1247. gmu_core_regwrite(device, A6XX_GMU_GENERAL_11, gmu->vrb->gmuaddr);
  1248. /* Set the log wptr index */
  1249. gmu_core_regwrite(device, A6XX_GPU_GMU_CX_GMU_PWR_COL_CP_RESP,
  1250. gmu->log_wptr_retention);
  1251. /* Pass chipid to GMU FW, must happen before starting GMU */
  1252. chipid = ADRENO_GMU_CHIPID(adreno_dev->chipid);
  1253. /*
  1254. * For A660 GPU variant, GMU firmware expects chipid as per below
  1255. * format to differentiate between A660 and A660 variant. In device
  1256. * tree, target version is specified as high nibble of patch to align
  1257. * with usermode driver expectation. Format the chipid according to
  1258. * firmware requirement.
  1259. *
  1260. * Bit 11-8: patch version
  1261. * Bit 15-12: minor version
  1262. * Bit 23-16: major version
  1263. * Bit 27-24: core version
  1264. * Bit 31-28: target version
  1265. */
  1266. if (adreno_is_a660_shima(adreno_dev))
  1267. chipid |= ((ADRENO_CHIPID_PATCH(adreno_dev->chipid) >> 4) << 28);
  1268. gmu_core_regwrite(device, A6XX_GMU_HFI_SFR_ADDR, chipid);
  1269. /* Log size is encoded in (number of 4K units - 1) */
  1270. gmu_log_info = (gmu->gmu_log->gmuaddr & 0xFFFFF000) |
  1271. ((GMU_LOG_SIZE/SZ_4K - 1) & 0xFF);
  1272. gmu_core_regwrite(device, A6XX_GPU_GMU_CX_GMU_PWR_COL_CP_MSG,
  1273. gmu_log_info);
  1274. /* Configure power control and bring the GMU out of reset */
  1275. a6xx_gmu_power_config(adreno_dev);
  1276. a6xx_gmu_enable_throttle_counters(adreno_dev);
  1277. }
  1278. struct kgsl_memdesc *reserve_gmu_kernel_block(struct a6xx_gmu_device *gmu,
  1279. u32 addr, u32 size, u32 vma_id, u32 align)
  1280. {
  1281. int ret;
  1282. struct kgsl_memdesc *md;
  1283. struct gmu_vma_entry *vma = &gmu->vma[vma_id];
  1284. struct kgsl_device *device = KGSL_DEVICE(a6xx_gmu_to_adreno(gmu));
  1285. u32 aligned_size = ALIGN(size, hfi_get_gmu_sz_alignment(align));
  1286. if (gmu->global_entries == ARRAY_SIZE(gmu->gmu_globals))
  1287. return ERR_PTR(-ENOMEM);
  1288. md = &gmu->gmu_globals[gmu->global_entries];
  1289. ret = kgsl_allocate_kernel(device, md, size, 0, KGSL_MEMDESC_SYSMEM);
  1290. if (ret) {
  1291. memset(md, 0x0, sizeof(*md));
  1292. return ERR_PTR(-ENOMEM);
  1293. }
  1294. if (!addr)
  1295. addr = ALIGN(vma->next_va, hfi_get_gmu_va_alignment(align));
  1296. ret = gmu_core_map_memdesc(gmu->domain, md, addr,
  1297. IOMMU_READ | IOMMU_WRITE | IOMMU_PRIV);
  1298. if (ret) {
  1299. dev_err(&gmu->pdev->dev,
  1300. "Unable to map GMU kernel block: addr:0x%08x size:0x%llx :%d\n",
  1301. addr, md->size, ret);
  1302. kgsl_sharedmem_free(md);
  1303. memset(md, 0, sizeof(*md));
  1304. return ERR_PTR(-ENOMEM);
  1305. }
  1306. md->gmuaddr = addr;
  1307. /* Take into account the size alignment when reserving the GMU VA */
  1308. vma->next_va = md->gmuaddr + aligned_size;
  1309. gmu->global_entries++;
  1310. return md;
  1311. }
  1312. struct kgsl_memdesc *reserve_gmu_kernel_block_fixed(struct a6xx_gmu_device *gmu,
  1313. u32 addr, u32 size, u32 vma_id, const char *resource, int attrs, u32 align)
  1314. {
  1315. int ret;
  1316. struct kgsl_memdesc *md;
  1317. struct gmu_vma_entry *vma = &gmu->vma[vma_id];
  1318. struct kgsl_device *device = KGSL_DEVICE(a6xx_gmu_to_adreno(gmu));
  1319. u32 aligned_size = ALIGN(size, hfi_get_gmu_sz_alignment(align));
  1320. if (gmu->global_entries == ARRAY_SIZE(gmu->gmu_globals))
  1321. return ERR_PTR(-ENOMEM);
  1322. md = &gmu->gmu_globals[gmu->global_entries];
  1323. ret = kgsl_memdesc_init_fixed(device, gmu->pdev, resource, md);
  1324. if (ret)
  1325. return ERR_PTR(ret);
  1326. if (!addr)
  1327. addr = ALIGN(vma->next_va, hfi_get_gmu_va_alignment(align));
  1328. if ((vma->next_va + aligned_size) > (vma->start + vma->size)) {
  1329. dev_err(&gmu->pdev->dev,
  1330. "GMU mapping too big. available: %d required: %d\n",
  1331. vma->next_va - vma->start, aligned_size);
  1332. md = ERR_PTR(-ENOMEM);
  1333. goto done;
  1334. }
  1335. ret = gmu_core_map_memdesc(gmu->domain, md, addr, attrs);
  1336. if (ret) {
  1337. dev_err(&gmu->pdev->dev,
  1338. "Unable to map GMU kernel block: addr:0x%08x size:0x%llx :%d\n",
  1339. addr, md->size, ret);
  1340. md = ERR_PTR(-ENOMEM);
  1341. goto done;
  1342. }
  1343. md->gmuaddr = addr;
  1344. /* Take into account the size alignment when reserving the GMU VA */
  1345. vma->next_va = md->gmuaddr + aligned_size;
  1346. gmu->global_entries++;
  1347. done:
  1348. sg_free_table(md->sgt);
  1349. kfree(md->sgt);
  1350. md->sgt = NULL;
  1351. return md;
  1352. }
  1353. static int reserve_entire_vma(struct a6xx_gmu_device *gmu, u32 vma_id)
  1354. {
  1355. struct kgsl_memdesc *md;
  1356. u32 start = gmu->vma[vma_id].start, size = gmu->vma[vma_id].size;
  1357. md = find_gmu_memdesc(gmu, start, size);
  1358. if (md)
  1359. return 0;
  1360. md = reserve_gmu_kernel_block(gmu, start, size, vma_id, 0);
  1361. return PTR_ERR_OR_ZERO(md);
  1362. }
  1363. static int a6xx_gmu_cache_finalize(struct adreno_device *adreno_dev)
  1364. {
  1365. struct a6xx_gmu_device *gmu = to_a6xx_gmu(adreno_dev);
  1366. struct kgsl_memdesc *md;
  1367. int ret;
  1368. /* Preallocations were made so no need to request all this memory */
  1369. if (gmu->preallocations)
  1370. return 0;
  1371. ret = reserve_entire_vma(gmu, GMU_ICACHE);
  1372. if (ret)
  1373. return ret;
  1374. if (!adreno_is_a650_family(adreno_dev)) {
  1375. ret = reserve_entire_vma(gmu, GMU_DCACHE);
  1376. if (ret)
  1377. return ret;
  1378. }
  1379. md = reserve_gmu_kernel_block(gmu, 0, SZ_4K, GMU_NONCACHED_KERNEL, 0);
  1380. if (IS_ERR(md))
  1381. return PTR_ERR(md);
  1382. gmu->preallocations = true;
  1383. return 0;
  1384. }
  1385. static int a6xx_gmu_process_prealloc(struct a6xx_gmu_device *gmu,
  1386. struct gmu_block_header *blk)
  1387. {
  1388. struct kgsl_memdesc *md;
  1389. int id = find_vma_block(gmu, blk->addr, blk->value);
  1390. if (id < 0) {
  1391. dev_err(&gmu->pdev->dev,
  1392. "Invalid prealloc block addr: 0x%x value:%d\n",
  1393. blk->addr, blk->value);
  1394. return id;
  1395. }
  1396. /* Nothing to do for TCM blocks or user uncached */
  1397. if (id == GMU_ITCM || id == GMU_DTCM || id == GMU_NONCACHED_USER)
  1398. return 0;
  1399. /* Check if the block is already allocated */
  1400. md = find_gmu_memdesc(gmu, blk->addr, blk->value);
  1401. if (md != NULL)
  1402. return 0;
  1403. md = reserve_gmu_kernel_block(gmu, blk->addr, blk->value, id, 0);
  1404. if (IS_ERR(md))
  1405. return PTR_ERR(md);
  1406. gmu->preallocations = true;
  1407. return 0;
  1408. }
  1409. int a6xx_gmu_parse_fw(struct adreno_device *adreno_dev)
  1410. {
  1411. struct a6xx_gmu_device *gmu = to_a6xx_gmu(adreno_dev);
  1412. const struct adreno_a6xx_core *a6xx_core = to_a6xx_core(adreno_dev);
  1413. struct gmu_block_header *blk;
  1414. int ret, offset = 0;
  1415. /* GMU fw already saved and verified so do nothing new */
  1416. if (!gmu->fw_image) {
  1417. if (a6xx_core->gmufw_name == NULL)
  1418. return -EINVAL;
  1419. ret = request_firmware(&gmu->fw_image, a6xx_core->gmufw_name,
  1420. &gmu->pdev->dev);
  1421. if (ret) {
  1422. dev_err(&gmu->pdev->dev, "request_firmware (%s) failed: %d\n",
  1423. a6xx_core->gmufw_name, ret);
  1424. return ret;
  1425. }
  1426. }
  1427. /*
  1428. * Zero payload fw blocks contain metadata and are
  1429. * guaranteed to precede fw load data. Parse the
  1430. * metadata blocks.
  1431. */
  1432. while (offset < gmu->fw_image->size) {
  1433. blk = (struct gmu_block_header *)&gmu->fw_image->data[offset];
  1434. if (offset + sizeof(*blk) > gmu->fw_image->size) {
  1435. dev_err(&gmu->pdev->dev, "Invalid FW Block\n");
  1436. return -EINVAL;
  1437. }
  1438. /* Done with zero length blocks so return */
  1439. if (blk->size)
  1440. break;
  1441. offset += sizeof(*blk);
  1442. if (blk->type == GMU_BLK_TYPE_PREALLOC_REQ ||
  1443. blk->type == GMU_BLK_TYPE_PREALLOC_PERSIST_REQ) {
  1444. ret = a6xx_gmu_process_prealloc(gmu, blk);
  1445. if (ret)
  1446. return ret;
  1447. }
  1448. }
  1449. return 0;
  1450. }
  1451. int a6xx_gmu_memory_init(struct adreno_device *adreno_dev)
  1452. {
  1453. struct a6xx_gmu_device *gmu = to_a6xx_gmu(adreno_dev);
  1454. /* Allocates & maps GMU crash dump memory */
  1455. if (adreno_is_a630(adreno_dev) || adreno_is_a615_family(adreno_dev)) {
  1456. if (IS_ERR_OR_NULL(gmu->dump_mem))
  1457. gmu->dump_mem = reserve_gmu_kernel_block(gmu, 0, SZ_16K,
  1458. GMU_NONCACHED_KERNEL, 0);
  1459. if (IS_ERR(gmu->dump_mem))
  1460. return PTR_ERR(gmu->dump_mem);
  1461. }
  1462. /* GMU master log */
  1463. if (IS_ERR_OR_NULL(gmu->gmu_log))
  1464. gmu->gmu_log = reserve_gmu_kernel_block(gmu, 0, GMU_LOG_SIZE,
  1465. GMU_NONCACHED_KERNEL, 0);
  1466. return PTR_ERR_OR_ZERO(gmu->gmu_log);
  1467. }
  1468. static int a6xx_gmu_init(struct adreno_device *adreno_dev)
  1469. {
  1470. int ret;
  1471. ret = a6xx_gmu_parse_fw(adreno_dev);
  1472. if (ret)
  1473. return ret;
  1474. /* Request any other cache ranges that might be required */
  1475. ret = a6xx_gmu_cache_finalize(adreno_dev);
  1476. if (ret)
  1477. return ret;
  1478. ret = a6xx_gmu_memory_init(adreno_dev);
  1479. if (ret)
  1480. return ret;
  1481. return a6xx_hfi_init(adreno_dev);
  1482. }
  1483. #define A6XX_VBIF_XIN_HALT_CTRL1_ACKS (BIT(0) | BIT(1) | BIT(2) | BIT(3))
  1484. static void a6xx_gmu_pwrctrl_suspend(struct adreno_device *adreno_dev)
  1485. {
  1486. int ret = 0;
  1487. struct a6xx_gmu_device *gmu = to_a6xx_gmu(adreno_dev);
  1488. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  1489. struct kgsl_pwrctrl *pwr = &device->pwrctrl;
  1490. /* If SPTP_RAC is on, turn off SPTP_RAC HS */
  1491. a6xx_gmu_sptprac_disable(adreno_dev);
  1492. /* Disconnect GPU from BUS is not needed if CX GDSC goes off later */
  1493. /*
  1494. * GEMNOC can enter power collapse state during GPU power down sequence.
  1495. * This could abort CX GDSC collapse. Assert Qactive to avoid this.
  1496. */
  1497. if ((adreno_is_a662(adreno_dev) || adreno_is_a621(adreno_dev) ||
  1498. adreno_is_a635(adreno_dev)))
  1499. gmu_core_regwrite(device, A6XX_GPU_GMU_CX_GMU_CX_FALNEXT_INTF, 0x1);
  1500. /* Check no outstanding RPMh voting */
  1501. a6xx_complete_rpmh_votes(adreno_dev, GPU_RESET_TIMEOUT);
  1502. /* Clear the WRITEDROPPED fields and set fence to allow mode */
  1503. gmu_core_regwrite(device, A6XX_GMU_AHB_FENCE_STATUS_CLR, 0x7);
  1504. gmu_core_regwrite(device, A6XX_GMU_AO_AHB_FENCE_CTRL, 0);
  1505. /* Make sure above writes are committed before we proceed to recovery */
  1506. wmb();
  1507. gmu_core_regwrite(device, A6XX_GMU_CM3_SYSRESET, 1);
  1508. if (!adreno_is_a630(adreno_dev)) {
  1509. /* Halt GX traffic */
  1510. if (a6xx_gmu_gx_is_on(adreno_dev)) {
  1511. kgsl_regwrite(device, A6XX_RBBM_GBIF_HALT,
  1512. A6XX_GBIF_GX_HALT_MASK);
  1513. adreno_wait_for_halt_ack(device,
  1514. A6XX_RBBM_GBIF_HALT_ACK,
  1515. A6XX_GBIF_GX_HALT_MASK);
  1516. }
  1517. /* Halt CX traffic */
  1518. a6xx_halt_gbif(adreno_dev);
  1519. /* De-assert the halts */
  1520. kgsl_regwrite(device, A6XX_GBIF_HALT, 0x0);
  1521. }
  1522. if (a6xx_gmu_gx_is_on(adreno_dev))
  1523. kgsl_regwrite(device, A6XX_RBBM_SW_RESET_CMD, 0x1);
  1524. /* Make sure above writes are posted before turning off power resources */
  1525. wmb();
  1526. /* Allow the software reset to complete */
  1527. udelay(100);
  1528. /*
  1529. * This is based on the assumption that GMU is the only one controlling
  1530. * the GX HS. This code path is the only client voting for GX through
  1531. * the regulator interface.
  1532. */
  1533. if (pwr->gx_gdsc) {
  1534. if (a6xx_gmu_gx_is_on(adreno_dev)) {
  1535. /* Switch gx gdsc control from GMU to CPU
  1536. * force non-zero reference count in clk driver
  1537. * so next disable call will turn
  1538. * off the GDSC
  1539. */
  1540. ret = regulator_enable(pwr->gx_gdsc);
  1541. if (ret)
  1542. dev_err(&gmu->pdev->dev,
  1543. "suspend fail: gx enable %d\n", ret);
  1544. /*
  1545. * Toggle the loop_en bit, across disabling the gx gdsc,
  1546. * with a delay of 10 XO cycles before disabling gx
  1547. * gdsc. This is to prevent CPR measurements from
  1548. * failing.
  1549. */
  1550. if (adreno_is_a660(adreno_dev)) {
  1551. gmu_core_regrmw(device, A6XX_GPU_CPR_FSM_CTL,
  1552. 1, 0);
  1553. ndelay(520);
  1554. }
  1555. ret = regulator_disable(pwr->gx_gdsc);
  1556. if (ret)
  1557. dev_err(&gmu->pdev->dev,
  1558. "suspend fail: gx disable %d\n", ret);
  1559. if (adreno_is_a660(adreno_dev))
  1560. gmu_core_regrmw(device, A6XX_GPU_CPR_FSM_CTL,
  1561. 1, 1);
  1562. if (a6xx_gmu_gx_is_on(adreno_dev))
  1563. dev_err(&gmu->pdev->dev,
  1564. "gx is stuck on\n");
  1565. }
  1566. }
  1567. }
  1568. /*
  1569. * a6xx_gmu_notify_slumber() - initiate request to GMU to prepare to slumber
  1570. * @device: Pointer to KGSL device
  1571. */
  1572. static int a6xx_gmu_notify_slumber(struct adreno_device *adreno_dev)
  1573. {
  1574. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  1575. struct kgsl_pwrctrl *pwr = &device->pwrctrl;
  1576. struct a6xx_gmu_device *gmu = to_a6xx_gmu(adreno_dev);
  1577. int bus_level = pwr->pwrlevels[pwr->default_pwrlevel].bus_freq;
  1578. int perf_idx = gmu->hfi.dcvs_table.gpu_level_num -
  1579. pwr->default_pwrlevel - 1;
  1580. int ret, state;
  1581. /* Disable the power counter so that the GMU is not busy */
  1582. gmu_core_regwrite(device, A6XX_GMU_CX_GMU_POWER_COUNTER_ENABLE, 0);
  1583. /* Turn off SPTPRAC if we own it */
  1584. if (gmu->idle_level == GPU_HW_ACTIVE)
  1585. a6xx_gmu_sptprac_disable(adreno_dev);
  1586. if (!ADRENO_QUIRK(adreno_dev, ADRENO_QUIRK_HFI_USE_REG)) {
  1587. struct hfi_prep_slumber_cmd req = {
  1588. .freq = perf_idx,
  1589. .bw = bus_level,
  1590. };
  1591. ret = CMD_MSG_HDR(req, H2F_MSG_PREPARE_SLUMBER);
  1592. if (!ret)
  1593. ret = a6xx_hfi_send_generic_req(adreno_dev, &req, sizeof(req));
  1594. goto out;
  1595. }
  1596. gmu_core_regwrite(device, A6XX_GMU_BOOT_SLUMBER_OPTION,
  1597. OOB_SLUMBER_OPTION);
  1598. gmu_core_regwrite(device, A6XX_GMU_GX_VOTE_IDX, perf_idx);
  1599. gmu_core_regwrite(device, A6XX_GMU_MX_VOTE_IDX, bus_level);
  1600. ret = a6xx_gmu_oob_set(device, oob_boot_slumber);
  1601. a6xx_gmu_oob_clear(device, oob_boot_slumber);
  1602. if (!ret) {
  1603. gmu_core_regread(device,
  1604. A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE, &state);
  1605. if (state != GPU_HW_SLUMBER) {
  1606. dev_err(&gmu->pdev->dev,
  1607. "Failed to prepare for slumber: 0x%x\n",
  1608. state);
  1609. ret = -ETIMEDOUT;
  1610. }
  1611. }
  1612. out:
  1613. /* Make sure the fence is in ALLOW mode */
  1614. gmu_core_regwrite(device, A6XX_GMU_AO_AHB_FENCE_CTRL, 0);
  1615. /*
  1616. * GEMNOC can enter power collapse state during GPU power down sequence.
  1617. * This could abort CX GDSC collapse. Assert Qactive to avoid this.
  1618. */
  1619. if ((adreno_is_a662(adreno_dev) || adreno_is_a621(adreno_dev) ||
  1620. adreno_is_a635(adreno_dev)))
  1621. gmu_core_regwrite(device, A6XX_GPU_GMU_CX_GMU_CX_FALNEXT_INTF, 0x1);
  1622. return ret;
  1623. }
  1624. void a6xx_gmu_suspend(struct adreno_device *adreno_dev)
  1625. {
  1626. struct a6xx_gmu_device *gmu = to_a6xx_gmu(adreno_dev);
  1627. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  1628. a6xx_gmu_pwrctrl_suspend(adreno_dev);
  1629. clk_bulk_disable_unprepare(gmu->num_clks, gmu->clks);
  1630. a6xx_gmu_disable_gdsc(adreno_dev);
  1631. a6xx_rdpm_cx_freq_update(gmu, 0);
  1632. dev_err(&gmu->pdev->dev, "Suspended GMU\n");
  1633. kgsl_pwrctrl_set_state(device, KGSL_STATE_NONE);
  1634. }
  1635. static int a6xx_gmu_dcvs_set(struct adreno_device *adreno_dev,
  1636. int gpu_pwrlevel, int bus_level)
  1637. {
  1638. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  1639. struct kgsl_pwrctrl *pwr = &device->pwrctrl;
  1640. struct a6xx_gmu_device *gmu = to_a6xx_gmu(adreno_dev);
  1641. struct hfi_dcvstable_cmd *table = &gmu->hfi.dcvs_table;
  1642. struct hfi_gx_bw_perf_vote_cmd req = {
  1643. .ack_type = DCVS_ACK_BLOCK,
  1644. .freq = INVALID_DCVS_IDX,
  1645. .bw = INVALID_DCVS_IDX,
  1646. };
  1647. int ret = 0;
  1648. if (!test_bit(GMU_PRIV_HFI_STARTED, &gmu->flags))
  1649. return 0;
  1650. /* Do not set to XO and lower GPU clock vote from GMU */
  1651. if ((gpu_pwrlevel != INVALID_DCVS_IDX) &&
  1652. (gpu_pwrlevel >= table->gpu_level_num - 1))
  1653. return -EINVAL;
  1654. if (gpu_pwrlevel < table->gpu_level_num - 1)
  1655. req.freq = table->gpu_level_num - gpu_pwrlevel - 1;
  1656. if (bus_level < pwr->ddr_table_count && bus_level > 0)
  1657. req.bw = bus_level;
  1658. /* GMU will vote for slumber levels through the sleep sequence */
  1659. if ((req.freq == INVALID_DCVS_IDX) &&
  1660. (req.bw == INVALID_DCVS_IDX)) {
  1661. return 0;
  1662. }
  1663. ret = CMD_MSG_HDR(req, H2F_MSG_GX_BW_PERF_VOTE);
  1664. if (ret)
  1665. return ret;
  1666. if (ADRENO_QUIRK(adreno_dev, ADRENO_QUIRK_HFI_USE_REG))
  1667. ret = a6xx_gmu_dcvs_nohfi(device, req.freq, req.bw);
  1668. else
  1669. ret = a6xx_hfi_send_generic_req(adreno_dev, &req, sizeof(req));
  1670. if (ret) {
  1671. dev_err_ratelimited(&gmu->pdev->dev,
  1672. "Failed to set GPU perf idx %u, bw idx %u\n",
  1673. req.freq, req.bw);
  1674. /*
  1675. * If this was a dcvs request along side an active gpu, request
  1676. * dispatcher based reset and recovery.
  1677. */
  1678. if (test_bit(GMU_PRIV_GPU_STARTED, &gmu->flags))
  1679. adreno_dispatcher_fault(adreno_dev, ADRENO_GMU_FAULT |
  1680. ADRENO_GMU_FAULT_SKIP_SNAPSHOT);
  1681. }
  1682. if (req.freq != INVALID_DCVS_IDX)
  1683. a6xx_rdpm_mx_freq_update(gmu,
  1684. gmu->hfi.dcvs_table.gx_votes[req.freq].freq);
  1685. return ret;
  1686. }
  1687. static int a6xx_gmu_clock_set(struct adreno_device *adreno_dev, u32 pwrlevel)
  1688. {
  1689. return a6xx_gmu_dcvs_set(adreno_dev, pwrlevel, INVALID_DCVS_IDX);
  1690. }
  1691. static int a6xx_gmu_ifpc_store(struct kgsl_device *device,
  1692. unsigned int val)
  1693. {
  1694. struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
  1695. struct a6xx_gmu_device *gmu = to_a6xx_gmu(adreno_dev);
  1696. unsigned int requested_idle_level;
  1697. if (!ADRENO_FEATURE(adreno_dev, ADRENO_IFPC))
  1698. return -EINVAL;
  1699. if (val)
  1700. requested_idle_level = GPU_HW_IFPC;
  1701. else
  1702. requested_idle_level = GPU_HW_ACTIVE;
  1703. if (gmu->idle_level == requested_idle_level)
  1704. return 0;
  1705. /* Power down the GPU before changing the idle level */
  1706. return adreno_power_cycle_u32(adreno_dev, &gmu->idle_level,
  1707. requested_idle_level);
  1708. }
  1709. static unsigned int a6xx_gmu_ifpc_isenabled(struct kgsl_device *device)
  1710. {
  1711. struct a6xx_gmu_device *gmu = to_a6xx_gmu(ADRENO_DEVICE(device));
  1712. return gmu->idle_level == GPU_HW_IFPC;
  1713. }
  1714. /* Send an NMI to the GMU */
  1715. void a6xx_gmu_send_nmi(struct kgsl_device *device, bool force)
  1716. {
  1717. struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
  1718. struct a6xx_gmu_device *gmu = to_a6xx_gmu(adreno_dev);
  1719. u32 val;
  1720. /*
  1721. * Do not send NMI if the SMMU is stalled because GMU will not be able
  1722. * to save cm3 state to DDR.
  1723. */
  1724. if (a6xx_gmu_gx_is_on(adreno_dev) && adreno_smmu_is_stalled(adreno_dev)) {
  1725. dev_err(&gmu->pdev->dev,
  1726. "Skipping NMI because SMMU is stalled\n");
  1727. return;
  1728. }
  1729. if (force)
  1730. goto nmi;
  1731. /*
  1732. * We should not send NMI if there was a CM3 fault reported because we
  1733. * don't want to overwrite the critical CM3 state captured by gmu before
  1734. * it sent the CM3 fault interrupt. Also don't send NMI if GMU reset is
  1735. * already active. We could have hit a GMU assert and NMI might have
  1736. * already been triggered.
  1737. */
  1738. /* make sure we're reading the latest cm3_fault */
  1739. smp_rmb();
  1740. if (atomic_read(&gmu->cm3_fault))
  1741. return;
  1742. gmu_core_regread(device, A6XX_GMU_CM3_FW_INIT_RESULT, &val);
  1743. if (val & 0xE00)
  1744. return;
  1745. nmi:
  1746. /* Mask so there's no interrupt caused by NMI */
  1747. gmu_core_regwrite(device, A6XX_GMU_GMU2HOST_INTR_MASK, 0xFFFFFFFF);
  1748. /* Make sure the interrupt is masked before causing it */
  1749. wmb();
  1750. if (ADRENO_QUIRK(adreno_dev, ADRENO_QUIRK_HFI_USE_REG))
  1751. gmu_core_regwrite(device,
  1752. A6XX_GMU_NMI_CONTROL_STATUS, 0);
  1753. /* This will cause the GMU to save it's internal state to ddr */
  1754. gmu_core_regread(device, A6XX_GMU_CM3_CFG, &val);
  1755. val |= BIT(9);
  1756. gmu_core_regwrite(device, A6XX_GMU_CM3_CFG, val);
  1757. /* Make sure the NMI is invoked before we proceed*/
  1758. wmb();
  1759. /* Wait for the NMI to be handled */
  1760. udelay(200);
  1761. }
  1762. static void a6xx_gmu_cooperative_reset(struct kgsl_device *device)
  1763. {
  1764. struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
  1765. struct a6xx_gmu_device *gmu = to_a6xx_gmu(adreno_dev);
  1766. unsigned int result;
  1767. gmu_core_regwrite(device, A6XX_GMU_CX_GMU_WDOG_CTRL, 0);
  1768. gmu_core_regwrite(device, A6XX_GMU_HOST2GMU_INTR_SET, BIT(17));
  1769. /*
  1770. * After triggering graceful death wait for snapshot ready
  1771. * indication from GMU.
  1772. */
  1773. if (!gmu_core_timed_poll_check(device, A6XX_GMU_CM3_FW_INIT_RESULT,
  1774. 0x800, 2, 0x800))
  1775. return;
  1776. gmu_core_regread(device, A6XX_GMU_CM3_FW_INIT_RESULT, &result);
  1777. dev_err(&gmu->pdev->dev,
  1778. "GMU cooperative reset timed out 0x%x\n", result);
  1779. /*
  1780. * If we dont get a snapshot ready from GMU, trigger NMI
  1781. * and if we still timeout then we just continue with reset.
  1782. */
  1783. a6xx_gmu_send_nmi(device, true);
  1784. gmu_core_regread(device, A6XX_GMU_CM3_FW_INIT_RESULT, &result);
  1785. if ((result & 0x800) != 0x800)
  1786. dev_err(&gmu->pdev->dev,
  1787. "GMU cooperative reset NMI timed out 0x%x\n", result);
  1788. }
  1789. static int a6xx_gmu_wait_for_active_transition(
  1790. struct kgsl_device *device)
  1791. {
  1792. unsigned int reg;
  1793. struct a6xx_gmu_device *gmu = to_a6xx_gmu(ADRENO_DEVICE(device));
  1794. if (!gmu_core_isenabled(device))
  1795. return 0;
  1796. if (gmu_core_timed_poll_check(device, A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE,
  1797. GPU_HW_ACTIVE, 100, GENMASK(3, 0))) {
  1798. gmu_core_regread(device, A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE, &reg);
  1799. dev_err(&gmu->pdev->dev,
  1800. "GMU failed to move to ACTIVE state, Current state: 0x%x\n",
  1801. reg);
  1802. return -ETIMEDOUT;
  1803. }
  1804. return 0;
  1805. }
  1806. static bool a6xx_gmu_scales_bandwidth(struct kgsl_device *device)
  1807. {
  1808. struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
  1809. return (ADRENO_GPUREV(adreno_dev) >= ADRENO_REV_A640);
  1810. }
  1811. void a6xx_gmu_handle_watchdog(struct adreno_device *adreno_dev)
  1812. {
  1813. struct a6xx_gmu_device *gmu = to_a6xx_gmu(adreno_dev);
  1814. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  1815. u32 mask;
  1816. /* Temporarily mask the watchdog interrupt to prevent a storm */
  1817. gmu_core_regread(device, A6XX_GMU_AO_HOST_INTERRUPT_MASK,
  1818. &mask);
  1819. gmu_core_regwrite(device, A6XX_GMU_AO_HOST_INTERRUPT_MASK,
  1820. (mask | GMU_INT_WDOG_BITE));
  1821. a6xx_gmu_send_nmi(device, false);
  1822. dev_err_ratelimited(&gmu->pdev->dev,
  1823. "GMU watchdog expired interrupt received\n");
  1824. }
  1825. static irqreturn_t a6xx_gmu_irq_handler(int irq, void *data)
  1826. {
  1827. struct kgsl_device *device = data;
  1828. struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
  1829. struct a6xx_gmu_device *gmu = to_a6xx_gmu(adreno_dev);
  1830. const struct a6xx_gpudev *a6xx_gpudev =
  1831. to_a6xx_gpudev(ADRENO_GPU_DEVICE(adreno_dev));
  1832. unsigned int status = 0;
  1833. gmu_core_regread(device, A6XX_GMU_AO_HOST_INTERRUPT_STATUS, &status);
  1834. gmu_core_regwrite(device, A6XX_GMU_AO_HOST_INTERRUPT_CLR, status);
  1835. /* Ignore GMU_INT_RSCC_COMP and GMU_INT_DBD WAKEUP interrupts */
  1836. if (status & GMU_INT_WDOG_BITE)
  1837. a6xx_gpudev->handle_watchdog(adreno_dev);
  1838. if (status & GMU_INT_HOST_AHB_BUS_ERR)
  1839. dev_err_ratelimited(&gmu->pdev->dev,
  1840. "AHB bus error interrupt received\n");
  1841. if (status & GMU_INT_FENCE_ERR) {
  1842. unsigned int fence_status;
  1843. gmu_core_regread(device, A6XX_GMU_AHB_FENCE_STATUS,
  1844. &fence_status);
  1845. dev_err_ratelimited(&gmu->pdev->dev,
  1846. "FENCE error interrupt received %x\n", fence_status);
  1847. }
  1848. if (status & ~GMU_AO_INT_MASK)
  1849. dev_err_ratelimited(&gmu->pdev->dev,
  1850. "Unhandled GMU interrupts 0x%lx\n",
  1851. status & ~GMU_AO_INT_MASK);
  1852. return IRQ_HANDLED;
  1853. }
  1854. void a6xx_gmu_snapshot(struct adreno_device *adreno_dev,
  1855. struct kgsl_snapshot *snapshot)
  1856. {
  1857. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  1858. a6xx_gmu_device_snapshot(device, snapshot);
  1859. a6xx_snapshot(adreno_dev, snapshot);
  1860. gmu_core_regwrite(device, A6XX_GMU_GMU2HOST_INTR_CLR,
  1861. 0xffffffff);
  1862. gmu_core_regwrite(device, A6XX_GMU_GMU2HOST_INTR_MASK,
  1863. HFI_IRQ_MASK);
  1864. }
  1865. void a6xx_gmu_aop_send_acd_state(struct a6xx_gmu_device *gmu, bool flag)
  1866. {
  1867. struct qmp_pkt msg;
  1868. char msg_buf[36];
  1869. u32 size;
  1870. int ret;
  1871. if (IS_ERR_OR_NULL(gmu->mailbox.channel))
  1872. return;
  1873. size = scnprintf(msg_buf, sizeof(msg_buf),
  1874. "{class: gpu, res: acd, val: %d}", flag);
  1875. /* mailbox controller expects 4-byte aligned buffer */
  1876. msg.size = ALIGN((size + 1), SZ_4);
  1877. msg.data = msg_buf;
  1878. ret = mbox_send_message(gmu->mailbox.channel, &msg);
  1879. if (ret < 0)
  1880. dev_err(&gmu->pdev->dev,
  1881. "AOP mbox send message failed: %d\n", ret);
  1882. }
  1883. int a6xx_gmu_enable_clks(struct adreno_device *adreno_dev, u32 level)
  1884. {
  1885. struct a6xx_gmu_device *gmu = to_a6xx_gmu(adreno_dev);
  1886. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  1887. int ret;
  1888. a6xx_rdpm_cx_freq_update(gmu, gmu->freqs[level] / 1000);
  1889. ret = kgsl_clk_set_rate(gmu->clks, gmu->num_clks, "gmu_clk",
  1890. gmu->freqs[level]);
  1891. if (ret) {
  1892. dev_err(&gmu->pdev->dev, "GMU clock:%d set failed:%d\n",
  1893. gmu->freqs[level], ret);
  1894. return ret;
  1895. }
  1896. ret = kgsl_clk_set_rate(gmu->clks, gmu->num_clks, "hub_clk",
  1897. adreno_dev->gmu_hub_clk_freq);
  1898. if (ret && ret != -ENODEV) {
  1899. dev_err(&gmu->pdev->dev, "Unable to set the HUB clock\n");
  1900. return ret;
  1901. }
  1902. ret = clk_bulk_prepare_enable(gmu->num_clks, gmu->clks);
  1903. if (ret) {
  1904. dev_err(&gmu->pdev->dev, "Cannot enable GMU clocks\n");
  1905. return ret;
  1906. }
  1907. device->state = KGSL_STATE_AWARE;
  1908. return 0;
  1909. }
  1910. static void a6xx_gmu_force_first_boot(struct kgsl_device *device)
  1911. {
  1912. struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
  1913. struct a6xx_gmu_device *gmu = to_a6xx_gmu(adreno_dev);
  1914. u32 val = 0;
  1915. if (gmu->pdc_cfg_base) {
  1916. kgsl_pwrctrl_enable_cx_gdsc(device);
  1917. a6xx_gmu_enable_clks(adreno_dev, 0);
  1918. val = __raw_readl(gmu->pdc_cfg_base + (PDC_GPU_ENABLE_PDC << 2));
  1919. /* ensure this read operation is done before the next one */
  1920. rmb();
  1921. clk_bulk_disable_unprepare(gmu->num_clks, gmu->clks);
  1922. a6xx_gmu_disable_gdsc(adreno_dev);
  1923. a6xx_rdpm_cx_freq_update(gmu, 0);
  1924. }
  1925. if (val != PDC_ENABLE_REG_VALUE) {
  1926. clear_bit(GMU_PRIV_RSCC_SLEEP_DONE, &gmu->flags);
  1927. clear_bit(GMU_PRIV_PDC_RSC_LOADED, &gmu->flags);
  1928. }
  1929. }
  1930. static int a6xx_gmu_first_boot(struct adreno_device *adreno_dev)
  1931. {
  1932. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  1933. struct kgsl_pwrctrl *pwr = &device->pwrctrl;
  1934. struct a6xx_gmu_device *gmu = to_a6xx_gmu(adreno_dev);
  1935. int level, ret;
  1936. kgsl_pwrctrl_request_state(device, KGSL_STATE_AWARE);
  1937. a6xx_gmu_aop_send_acd_state(gmu, adreno_dev->acd_enabled);
  1938. ret = kgsl_pwrctrl_enable_cx_gdsc(device);
  1939. if (ret)
  1940. return ret;
  1941. ret = a6xx_gmu_enable_clks(adreno_dev, 0);
  1942. if (ret)
  1943. goto gdsc_off;
  1944. ret = a6xx_gmu_load_fw(adreno_dev);
  1945. if (ret)
  1946. goto clks_gdsc_off;
  1947. ret = a6xx_gmu_itcm_shadow(adreno_dev);
  1948. if (ret)
  1949. goto clks_gdsc_off;
  1950. a6xx_gmu_register_config(adreno_dev);
  1951. a6xx_gmu_version_info(adreno_dev);
  1952. a6xx_gmu_irq_enable(adreno_dev);
  1953. /* Vote for minimal DDR BW for GMU to init */
  1954. level = pwr->pwrlevels[pwr->default_pwrlevel].bus_min;
  1955. icc_set_bw(pwr->icc_path, 0, kBps_to_icc(pwr->ddr_table[level]));
  1956. /* Clear any GPU faults that might have been left over */
  1957. adreno_clear_gpu_fault(adreno_dev);
  1958. ret = a6xx_gmu_device_start(adreno_dev);
  1959. if (ret)
  1960. goto err;
  1961. if (ADRENO_QUIRK(adreno_dev, ADRENO_QUIRK_HFI_USE_REG)) {
  1962. ret = a6xx_gmu_gfx_rail_on(adreno_dev);
  1963. if (ret) {
  1964. a6xx_gmu_oob_clear(device, oob_boot_slumber);
  1965. goto err;
  1966. }
  1967. }
  1968. if (gmu->idle_level == GPU_HW_ACTIVE) {
  1969. ret = a6xx_gmu_sptprac_enable(adreno_dev);
  1970. if (ret)
  1971. goto err;
  1972. }
  1973. if (!test_bit(GMU_PRIV_PDC_RSC_LOADED, &gmu->flags)) {
  1974. ret = a6xx_load_pdc_ucode(adreno_dev);
  1975. if (ret)
  1976. goto err;
  1977. a6xx_load_rsc_ucode(adreno_dev);
  1978. set_bit(GMU_PRIV_PDC_RSC_LOADED, &gmu->flags);
  1979. }
  1980. ret = a6xx_gmu_hfi_start(adreno_dev);
  1981. if (ret)
  1982. goto err;
  1983. ret = a6xx_hfi_start(adreno_dev);
  1984. if (ret)
  1985. goto err;
  1986. icc_set_bw(pwr->icc_path, 0, 0);
  1987. device->gmu_fault = false;
  1988. kgsl_pwrctrl_set_state(device, KGSL_STATE_AWARE);
  1989. return 0;
  1990. err:
  1991. a6xx_gmu_irq_disable(adreno_dev);
  1992. if (device->gmu_fault) {
  1993. a6xx_gmu_suspend(adreno_dev);
  1994. return ret;
  1995. }
  1996. clks_gdsc_off:
  1997. clk_bulk_disable_unprepare(gmu->num_clks, gmu->clks);
  1998. gdsc_off:
  1999. a6xx_gmu_disable_gdsc(adreno_dev);
  2000. a6xx_rdpm_cx_freq_update(gmu, 0);
  2001. return ret;
  2002. }
  2003. static int a6xx_gmu_boot(struct adreno_device *adreno_dev)
  2004. {
  2005. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  2006. struct a6xx_gmu_device *gmu = to_a6xx_gmu(adreno_dev);
  2007. int ret = 0;
  2008. kgsl_pwrctrl_request_state(device, KGSL_STATE_AWARE);
  2009. ret = kgsl_pwrctrl_enable_cx_gdsc(device);
  2010. if (ret)
  2011. return ret;
  2012. ret = a6xx_gmu_enable_clks(adreno_dev, 0);
  2013. if (ret)
  2014. goto gdsc_off;
  2015. ret = a6xx_rscc_wakeup_sequence(adreno_dev);
  2016. if (ret)
  2017. goto clks_gdsc_off;
  2018. ret = a6xx_gmu_load_fw(adreno_dev);
  2019. if (ret)
  2020. goto clks_gdsc_off;
  2021. a6xx_gmu_register_config(adreno_dev);
  2022. a6xx_gmu_irq_enable(adreno_dev);
  2023. /* Clear any GPU faults that might have been left over */
  2024. adreno_clear_gpu_fault(adreno_dev);
  2025. ret = a6xx_gmu_device_start(adreno_dev);
  2026. if (ret)
  2027. goto err;
  2028. if (ADRENO_QUIRK(adreno_dev, ADRENO_QUIRK_HFI_USE_REG)) {
  2029. ret = a6xx_gmu_gfx_rail_on(adreno_dev);
  2030. if (ret) {
  2031. a6xx_gmu_oob_clear(device, oob_boot_slumber);
  2032. goto err;
  2033. }
  2034. }
  2035. if (gmu->idle_level == GPU_HW_ACTIVE) {
  2036. ret = a6xx_gmu_sptprac_enable(adreno_dev);
  2037. if (ret)
  2038. goto err;
  2039. }
  2040. ret = a6xx_gmu_hfi_start(adreno_dev);
  2041. if (ret)
  2042. goto err;
  2043. ret = a6xx_hfi_start(adreno_dev);
  2044. if (ret)
  2045. goto err;
  2046. device->gmu_fault = false;
  2047. kgsl_pwrctrl_set_state(device, KGSL_STATE_AWARE);
  2048. return 0;
  2049. err:
  2050. a6xx_gmu_irq_disable(adreno_dev);
  2051. if (device->gmu_fault) {
  2052. a6xx_gmu_suspend(adreno_dev);
  2053. return ret;
  2054. }
  2055. clks_gdsc_off:
  2056. clk_bulk_disable_unprepare(gmu->num_clks, gmu->clks);
  2057. gdsc_off:
  2058. a6xx_gmu_disable_gdsc(adreno_dev);
  2059. a6xx_rdpm_cx_freq_update(gmu, 0);
  2060. return ret;
  2061. }
  2062. static void set_acd(struct adreno_device *adreno_dev, void *priv)
  2063. {
  2064. struct a6xx_gmu_device *gmu = to_a6xx_gmu(adreno_dev);
  2065. adreno_dev->acd_enabled = *((bool *)priv);
  2066. a6xx_gmu_aop_send_acd_state(gmu, adreno_dev->acd_enabled);
  2067. }
  2068. static int a6xx_gmu_acd_set(struct kgsl_device *device, bool val)
  2069. {
  2070. struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
  2071. struct a6xx_gmu_device *gmu = to_a6xx_gmu(adreno_dev);
  2072. if (IS_ERR_OR_NULL(gmu->mailbox.channel))
  2073. return -EINVAL;
  2074. /* Don't do any unneeded work if ACD is already in the correct state */
  2075. if (adreno_dev->acd_enabled == val)
  2076. return 0;
  2077. /* Power cycle the GPU for changes to take effect */
  2078. return adreno_power_cycle(adreno_dev, set_acd, &val);
  2079. }
  2080. static void a6xx_send_tlb_hint(struct kgsl_device *device, bool val)
  2081. {
  2082. struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
  2083. struct a6xx_gmu_device *gmu = to_a6xx_gmu(adreno_dev);
  2084. if (!gmu->domain)
  2085. return;
  2086. #if (KERNEL_VERSION(6, 1, 0) <= LINUX_VERSION_CODE)
  2087. qcom_skip_tlb_management(&gmu->pdev->dev, val);
  2088. #endif
  2089. if (!val)
  2090. iommu_flush_iotlb_all(gmu->domain);
  2091. }
  2092. static const struct gmu_dev_ops a6xx_gmudev = {
  2093. .oob_set = a6xx_gmu_oob_set,
  2094. .oob_clear = a6xx_gmu_oob_clear,
  2095. .ifpc_store = a6xx_gmu_ifpc_store,
  2096. .ifpc_isenabled = a6xx_gmu_ifpc_isenabled,
  2097. .cooperative_reset = a6xx_gmu_cooperative_reset,
  2098. .wait_for_active_transition = a6xx_gmu_wait_for_active_transition,
  2099. .scales_bandwidth = a6xx_gmu_scales_bandwidth,
  2100. .acd_set = a6xx_gmu_acd_set,
  2101. .force_first_boot = a6xx_gmu_force_first_boot,
  2102. .send_nmi = a6xx_gmu_send_nmi,
  2103. .send_tlb_hint = a6xx_send_tlb_hint,
  2104. };
  2105. static int a6xx_gmu_bus_set(struct adreno_device *adreno_dev, int buslevel,
  2106. u32 ab)
  2107. {
  2108. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  2109. struct kgsl_pwrctrl *pwr = &device->pwrctrl;
  2110. int ret = 0;
  2111. kgsl_icc_set_tag(pwr, buslevel);
  2112. if (buslevel != pwr->cur_buslevel) {
  2113. ret = a6xx_gmu_dcvs_set(adreno_dev, INVALID_DCVS_IDX, buslevel);
  2114. if (ret)
  2115. return ret;
  2116. pwr->cur_buslevel = buslevel;
  2117. }
  2118. if (ab != pwr->cur_ab) {
  2119. icc_set_bw(pwr->icc_path, MBps_to_icc(ab), 0);
  2120. pwr->cur_ab = ab;
  2121. }
  2122. trace_kgsl_buslevel(device, pwr->active_pwrlevel, pwr->cur_buslevel, pwr->cur_ab);
  2123. return ret;
  2124. }
  2125. static void a6xx_free_gmu_globals(struct a6xx_gmu_device *gmu)
  2126. {
  2127. int i;
  2128. for (i = 0; i < gmu->global_entries && i < ARRAY_SIZE(gmu->gmu_globals); i++) {
  2129. struct kgsl_memdesc *md = &gmu->gmu_globals[i];
  2130. if (!md->gmuaddr)
  2131. continue;
  2132. iommu_unmap(gmu->domain, md->gmuaddr, md->size);
  2133. if (md->priv & KGSL_MEMDESC_SYSMEM)
  2134. kgsl_sharedmem_free(md);
  2135. memset(md, 0, sizeof(*md));
  2136. }
  2137. if (gmu->domain) {
  2138. iommu_detach_device(gmu->domain, &gmu->pdev->dev);
  2139. iommu_domain_free(gmu->domain);
  2140. gmu->domain = NULL;
  2141. }
  2142. gmu->global_entries = 0;
  2143. }
  2144. static int a6xx_gmu_aop_mailbox_init(struct adreno_device *adreno_dev,
  2145. struct a6xx_gmu_device *gmu)
  2146. {
  2147. struct kgsl_mailbox *mailbox = &gmu->mailbox;
  2148. mailbox->client.dev = &gmu->pdev->dev;
  2149. mailbox->client.tx_block = true;
  2150. mailbox->client.tx_tout = 1000;
  2151. mailbox->client.knows_txdone = false;
  2152. mailbox->channel = mbox_request_channel(&mailbox->client, 0);
  2153. if (IS_ERR(mailbox->channel))
  2154. return PTR_ERR(mailbox->channel);
  2155. adreno_dev->acd_enabled = true;
  2156. return 0;
  2157. }
  2158. static void a6xx_gmu_acd_probe(struct kgsl_device *device,
  2159. struct a6xx_gmu_device *gmu, struct device_node *node)
  2160. {
  2161. struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
  2162. struct kgsl_pwrctrl *pwr = &device->pwrctrl;
  2163. struct kgsl_pwrlevel *pwrlevel =
  2164. &pwr->pwrlevels[pwr->num_pwrlevels - 1];
  2165. struct hfi_acd_table_cmd *cmd = &gmu->hfi.acd_table;
  2166. int ret, i, cmd_idx = 0;
  2167. if (!ADRENO_FEATURE(adreno_dev, ADRENO_ACD))
  2168. return;
  2169. cmd->hdr = CREATE_MSG_HDR(H2F_MSG_ACD_TBL, HFI_MSG_CMD);
  2170. cmd->version = 1;
  2171. cmd->stride = 1;
  2172. cmd->enable_by_level = 0;
  2173. /*
  2174. * Iterate through each gpu power level and generate a mask for GMU
  2175. * firmware for ACD enabled levels and store the corresponding control
  2176. * register configurations to the acd_table structure.
  2177. */
  2178. for (i = 0; i < pwr->num_pwrlevels; i++) {
  2179. if (pwrlevel->acd_level) {
  2180. cmd->enable_by_level |= (1 << (i + 1));
  2181. cmd->data[cmd_idx++] = pwrlevel->acd_level;
  2182. }
  2183. pwrlevel--;
  2184. }
  2185. if (!cmd->enable_by_level)
  2186. return;
  2187. cmd->num_levels = cmd_idx;
  2188. ret = a6xx_gmu_aop_mailbox_init(adreno_dev, gmu);
  2189. if (ret)
  2190. dev_err(&gmu->pdev->dev,
  2191. "AOP mailbox init failed: %d\n", ret);
  2192. }
  2193. static int a6xx_gmu_reg_probe(struct adreno_device *adreno_dev)
  2194. {
  2195. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  2196. struct a6xx_gmu_device *gmu = to_a6xx_gmu(adreno_dev);
  2197. int ret;
  2198. ret = kgsl_regmap_add_region(&device->regmap, gmu->pdev,
  2199. "kgsl_gmu_reg", NULL, NULL);
  2200. if (ret)
  2201. dev_err(&gmu->pdev->dev, "Unable to map the GMU registers\n");
  2202. return ret;
  2203. }
  2204. static int a6xx_gmu_clk_probe(struct adreno_device *adreno_dev)
  2205. {
  2206. struct a6xx_gmu_device *gmu = to_a6xx_gmu(adreno_dev);
  2207. int ret, i;
  2208. int tbl_size;
  2209. int num_freqs;
  2210. int offset;
  2211. ret = devm_clk_bulk_get_all(&gmu->pdev->dev, &gmu->clks);
  2212. if (ret < 0)
  2213. return ret;
  2214. /*
  2215. * Voting for apb_pclk will enable power and clocks required for
  2216. * QDSS path to function. However, if QCOM_KGSL_QDSS_STM is not enabled,
  2217. * QDSS is essentially unusable. Hence, if QDSS cannot be used,
  2218. * don't vote for this clock.
  2219. */
  2220. if (!IS_ENABLED(CONFIG_QCOM_KGSL_QDSS_STM)) {
  2221. for (i = 0; i < ret; i++) {
  2222. if (!strcmp(gmu->clks[i].id, "apb_pclk")) {
  2223. gmu->clks[i].clk = NULL;
  2224. break;
  2225. }
  2226. }
  2227. }
  2228. gmu->num_clks = ret;
  2229. /* Read the optional list of GMU frequencies */
  2230. if (of_get_property(gmu->pdev->dev.of_node,
  2231. "qcom,gmu-freq-table", &tbl_size) == NULL)
  2232. goto default_gmu_freq;
  2233. num_freqs = (tbl_size / sizeof(u32)) / 2;
  2234. if (num_freqs != ARRAY_SIZE(gmu->freqs))
  2235. goto default_gmu_freq;
  2236. for (i = 0; i < num_freqs; i++) {
  2237. offset = i * 2;
  2238. ret = of_property_read_u32_index(gmu->pdev->dev.of_node,
  2239. "qcom,gmu-freq-table", offset, &gmu->freqs[i]);
  2240. if (ret)
  2241. goto default_gmu_freq;
  2242. ret = of_property_read_u32_index(gmu->pdev->dev.of_node,
  2243. "qcom,gmu-freq-table", offset + 1, &gmu->vlvls[i]);
  2244. if (ret)
  2245. goto default_gmu_freq;
  2246. }
  2247. return 0;
  2248. default_gmu_freq:
  2249. /* The GMU frequency table is missing or invalid. Go with a default */
  2250. gmu->freqs[0] = GMU_FREQ_MIN;
  2251. gmu->vlvls[0] = RPMH_REGULATOR_LEVEL_MIN_SVS;
  2252. gmu->freqs[1] = GMU_FREQ_MAX;
  2253. gmu->vlvls[1] = RPMH_REGULATOR_LEVEL_SVS;
  2254. if (adreno_is_a660(adreno_dev))
  2255. gmu->vlvls[0] = RPMH_REGULATOR_LEVEL_LOW_SVS;
  2256. return 0;
  2257. }
  2258. static void a6xx_gmu_rdpm_probe(struct a6xx_gmu_device *gmu,
  2259. struct kgsl_device *device)
  2260. {
  2261. struct resource *res;
  2262. res = platform_get_resource_byname(device->pdev, IORESOURCE_MEM,
  2263. "rdpm_cx");
  2264. if (res)
  2265. gmu->rdpm_cx_virt = devm_ioremap(&device->pdev->dev,
  2266. res->start, resource_size(res));
  2267. res = platform_get_resource_byname(device->pdev, IORESOURCE_MEM,
  2268. "rdpm_mx");
  2269. if (res)
  2270. gmu->rdpm_mx_virt = devm_ioremap(&device->pdev->dev,
  2271. res->start, resource_size(res));
  2272. }
  2273. void a6xx_gmu_remove(struct kgsl_device *device)
  2274. {
  2275. struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
  2276. struct a6xx_gmu_device *gmu = to_a6xx_gmu(adreno_dev);
  2277. if (!IS_ERR_OR_NULL(gmu->mailbox.channel))
  2278. mbox_free_channel(gmu->mailbox.channel);
  2279. adreno_dev->acd_enabled = false;
  2280. if (gmu->fw_image)
  2281. release_firmware(gmu->fw_image);
  2282. a6xx_free_gmu_globals(gmu);
  2283. vfree(gmu->itcm_shadow);
  2284. kobject_put(&gmu->log_kobj);
  2285. kobject_put(&gmu->stats_kobj);
  2286. }
  2287. static int a6xx_gmu_iommu_fault_handler(struct iommu_domain *domain,
  2288. struct device *dev, unsigned long addr, int flags, void *token)
  2289. {
  2290. char *fault_type = "unknown";
  2291. if (flags & IOMMU_FAULT_TRANSLATION)
  2292. fault_type = "translation";
  2293. else if (flags & IOMMU_FAULT_PERMISSION)
  2294. fault_type = "permission";
  2295. else if (flags & IOMMU_FAULT_EXTERNAL)
  2296. fault_type = "external";
  2297. else if (flags & IOMMU_FAULT_TRANSACTION_STALLED)
  2298. fault_type = "transaction stalled";
  2299. dev_err(dev, "GMU fault addr = %lX, context=kernel (%s %s fault)\n",
  2300. addr,
  2301. (flags & IOMMU_FAULT_WRITE) ? "write" : "read",
  2302. fault_type);
  2303. return 0;
  2304. }
  2305. static int a6xx_gmu_iommu_init(struct a6xx_gmu_device *gmu)
  2306. {
  2307. int ret;
  2308. gmu->domain = iommu_domain_alloc(&platform_bus_type);
  2309. if (gmu->domain == NULL) {
  2310. dev_err(&gmu->pdev->dev, "Unable to allocate GMU IOMMU domain\n");
  2311. return -ENODEV;
  2312. }
  2313. /*
  2314. * Disable stall on fault for the GMU context bank.
  2315. * This sets SCTLR.CFCFG = 0.
  2316. * Also note that, the smmu driver sets SCTLR.HUPCF = 0 by default.
  2317. */
  2318. qcom_iommu_set_fault_model(gmu->domain, QCOM_IOMMU_FAULT_MODEL_NO_STALL);
  2319. ret = iommu_attach_device(gmu->domain, &gmu->pdev->dev);
  2320. if (!ret) {
  2321. iommu_set_fault_handler(gmu->domain,
  2322. a6xx_gmu_iommu_fault_handler, gmu);
  2323. return 0;
  2324. }
  2325. dev_err(&gmu->pdev->dev,
  2326. "Unable to attach GMU IOMMU domain: %d\n", ret);
  2327. iommu_domain_free(gmu->domain);
  2328. gmu->domain = NULL;
  2329. return ret;
  2330. }
  2331. int a6xx_gmu_probe(struct kgsl_device *device,
  2332. struct platform_device *pdev)
  2333. {
  2334. struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
  2335. struct a6xx_gmu_device *gmu = to_a6xx_gmu(adreno_dev);
  2336. struct device *dev = &pdev->dev;
  2337. struct resource *res;
  2338. int ret;
  2339. gmu->pdev = pdev;
  2340. dma_set_coherent_mask(&gmu->pdev->dev, DMA_BIT_MASK(64));
  2341. gmu->pdev->dev.dma_mask = &gmu->pdev->dev.coherent_dma_mask;
  2342. set_dma_ops(&gmu->pdev->dev, NULL);
  2343. res = platform_get_resource_byname(device->pdev, IORESOURCE_MEM,
  2344. "rscc");
  2345. if (res) {
  2346. gmu->rscc_virt = devm_ioremap(&device->pdev->dev, res->start,
  2347. resource_size(res));
  2348. if (gmu->rscc_virt == NULL) {
  2349. dev_err(&gmu->pdev->dev, "rscc ioremap failed\n");
  2350. return -ENOMEM;
  2351. }
  2352. }
  2353. /* Setup any rdpm register ranges */
  2354. a6xx_gmu_rdpm_probe(gmu, device);
  2355. /* Set up GMU regulators */
  2356. ret = kgsl_pwrctrl_probe_regulators(device, pdev);
  2357. if (ret)
  2358. return ret;
  2359. ret = a6xx_gmu_clk_probe(adreno_dev);
  2360. if (ret < 0)
  2361. return ret;
  2362. /* Set up GMU IOMMU and shared memory with GMU */
  2363. ret = a6xx_gmu_iommu_init(gmu);
  2364. if (ret)
  2365. goto error;
  2366. if (adreno_is_a650_family(adreno_dev))
  2367. gmu->vma = a6xx_gmu_vma;
  2368. else
  2369. gmu->vma = a6xx_gmu_vma_legacy;
  2370. /* Map and reserve GMU CSRs registers */
  2371. ret = a6xx_gmu_reg_probe(adreno_dev);
  2372. if (ret)
  2373. goto error;
  2374. /* Populates RPMh configurations */
  2375. ret = a6xx_build_rpmh_tables(adreno_dev);
  2376. if (ret)
  2377. goto error;
  2378. /* Set up GMU idle state */
  2379. if (ADRENO_FEATURE(adreno_dev, ADRENO_IFPC)) {
  2380. gmu->idle_level = GPU_HW_IFPC;
  2381. adreno_dev->ifpc_hyst = A6X_GMU_LONG_IFPC_HYST;
  2382. adreno_dev->ifpc_hyst_floor = A6X_GMU_LONG_IFPC_HYST_FLOOR;
  2383. } else {
  2384. gmu->idle_level = GPU_HW_ACTIVE;
  2385. }
  2386. a6xx_gmu_acd_probe(device, gmu, pdev->dev.of_node);
  2387. set_bit(GMU_ENABLED, &device->gmu_core.flags);
  2388. /* Initialize to zero to detect trace packet loss */
  2389. gmu->trace.seq_num = 0;
  2390. device->gmu_core.dev_ops = &a6xx_gmudev;
  2391. /* Set default GMU attributes */
  2392. gmu->log_stream_enable = false;
  2393. gmu->log_group_mask = 0x3;
  2394. /* Disabled by default */
  2395. gmu->stats_enable = false;
  2396. /* Set default to CM3 busy cycles countable */
  2397. gmu->stats_mask = BIT(A6XX_GMU_CM3_BUSY_CYCLES);
  2398. /* Interval is in 50 us units. Set default sampling frequency to 4x50 us */
  2399. gmu->stats_interval = HFI_FEATURE_GMU_STATS_INTERVAL;
  2400. /* GMU sysfs nodes setup */
  2401. (void) kobject_init_and_add(&gmu->log_kobj, &log_kobj_type, &dev->kobj, "log");
  2402. (void) kobject_init_and_add(&gmu->stats_kobj, &stats_kobj_type, &dev->kobj, "stats");
  2403. of_property_read_u32(gmu->pdev->dev.of_node, "qcom,gmu-perf-ddr-bw",
  2404. &gmu->perf_ddr_bw);
  2405. gmu->irq = kgsl_request_irq(gmu->pdev, "kgsl_gmu_irq",
  2406. a6xx_gmu_irq_handler, device);
  2407. if (gmu->irq >= 0)
  2408. return 0;
  2409. ret = gmu->irq;
  2410. error:
  2411. a6xx_gmu_remove(device);
  2412. return ret;
  2413. }
  2414. static void a6xx_gmu_active_count_put(struct adreno_device *adreno_dev)
  2415. {
  2416. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  2417. if (WARN_ON(!mutex_is_locked(&device->mutex)))
  2418. return;
  2419. if (WARN(atomic_read(&device->active_cnt) == 0,
  2420. "Unbalanced get/put calls to KGSL active count\n"))
  2421. return;
  2422. if (atomic_dec_and_test(&device->active_cnt)) {
  2423. kgsl_pwrscale_update_stats(device);
  2424. kgsl_pwrscale_update(device);
  2425. kgsl_start_idle_timer(device);
  2426. }
  2427. trace_kgsl_active_count(device,
  2428. (unsigned long) __builtin_return_address(0));
  2429. wake_up(&device->active_cnt_wq);
  2430. }
  2431. int a6xx_halt_gbif(struct adreno_device *adreno_dev)
  2432. {
  2433. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  2434. int ret;
  2435. /* Halt new client requests */
  2436. kgsl_regwrite(device, A6XX_GBIF_HALT, A6XX_GBIF_CLIENT_HALT_MASK);
  2437. ret = adreno_wait_for_halt_ack(device,
  2438. A6XX_GBIF_HALT_ACK, A6XX_GBIF_CLIENT_HALT_MASK);
  2439. /* Halt all AXI requests */
  2440. kgsl_regwrite(device, A6XX_GBIF_HALT, A6XX_GBIF_ARB_HALT_MASK);
  2441. ret = adreno_wait_for_halt_ack(device,
  2442. A6XX_GBIF_HALT_ACK, A6XX_GBIF_ARB_HALT_MASK);
  2443. return ret;
  2444. }
  2445. #define RPMH_VOTE_TIMEOUT 2 /* ms */
  2446. static int a6xx_gmu_power_off(struct adreno_device *adreno_dev)
  2447. {
  2448. struct a6xx_gmu_device *gmu = to_a6xx_gmu(adreno_dev);
  2449. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  2450. int ret = 0;
  2451. if (device->gmu_fault)
  2452. goto error;
  2453. /* Wait for the lowest idle level we requested */
  2454. ret = a6xx_gmu_wait_for_lowest_idle(adreno_dev);
  2455. if (ret)
  2456. goto error;
  2457. ret = a6xx_complete_rpmh_votes(adreno_dev, RPMH_VOTE_TIMEOUT);
  2458. if (ret)
  2459. goto error;
  2460. ret = a6xx_gmu_notify_slumber(adreno_dev);
  2461. if (ret)
  2462. goto error;
  2463. ret = a6xx_gmu_wait_for_idle(adreno_dev);
  2464. if (ret)
  2465. goto error;
  2466. ret = a6xx_rscc_sleep_sequence(adreno_dev);
  2467. a6xx_rdpm_mx_freq_update(gmu, 0);
  2468. /* Now that we are done with GMU and GPU, Clear the GBIF */
  2469. if (!adreno_is_a630(adreno_dev)) {
  2470. ret = a6xx_halt_gbif(adreno_dev);
  2471. /* De-assert the halts */
  2472. kgsl_regwrite(device, A6XX_GBIF_HALT, 0x0);
  2473. }
  2474. a6xx_gmu_irq_disable(adreno_dev);
  2475. a6xx_hfi_stop(adreno_dev);
  2476. clk_bulk_disable_unprepare(gmu->num_clks, gmu->clks);
  2477. a6xx_gmu_disable_gdsc(adreno_dev);
  2478. a6xx_rdpm_cx_freq_update(gmu, 0);
  2479. kgsl_pwrctrl_set_state(device, KGSL_STATE_NONE);
  2480. return ret;
  2481. error:
  2482. a6xx_gmu_irq_disable(adreno_dev);
  2483. a6xx_hfi_stop(adreno_dev);
  2484. a6xx_gmu_suspend(adreno_dev);
  2485. return ret;
  2486. }
  2487. void a6xx_enable_gpu_irq(struct adreno_device *adreno_dev)
  2488. {
  2489. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  2490. kgsl_pwrctrl_irq(device, true);
  2491. adreno_irqctrl(adreno_dev, 1);
  2492. }
  2493. void a6xx_disable_gpu_irq(struct adreno_device *adreno_dev)
  2494. {
  2495. kgsl_pwrctrl_irq(KGSL_DEVICE(adreno_dev), false);
  2496. if (a6xx_gmu_gx_is_on(adreno_dev))
  2497. adreno_irqctrl(adreno_dev, 0);
  2498. }
  2499. static void a6xx_fusa_init(struct adreno_device *adreno_dev)
  2500. {
  2501. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  2502. void __iomem *fusa_virt = NULL;
  2503. struct resource *res;
  2504. if (!adreno_is_a663(adreno_dev))
  2505. return;
  2506. res = platform_get_resource_byname(device->pdev,
  2507. IORESOURCE_MEM, "fusa");
  2508. if (res)
  2509. fusa_virt = ioremap(res->start, resource_size(res));
  2510. if (!fusa_virt) {
  2511. dev_err(device->dev, "Failed to map fusa\n");
  2512. return;
  2513. }
  2514. /* Disable fusa mode in boot stage */
  2515. _regrmw(fusa_virt, A6XX_GPU_FUSA_REG_ECC_CTRL - A6XX_GPU_FUSA_REG_BASE,
  2516. A6XX_GPU_FUSA_DISABLE_MASK, A6XX_GPU_FUSA_DISABLE_BITS);
  2517. _regrmw(fusa_virt, A6XX_GPU_FUSA_REG_CSR_PRIY - A6XX_GPU_FUSA_REG_BASE,
  2518. A6XX_GPU_FUSA_DISABLE_MASK, A6XX_GPU_FUSA_DISABLE_BITS);
  2519. iounmap(fusa_virt);
  2520. }
  2521. static int a6xx_gpu_boot(struct adreno_device *adreno_dev)
  2522. {
  2523. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  2524. int ret;
  2525. adreno_set_active_ctxs_null(adreno_dev);
  2526. ret = kgsl_mmu_start(device);
  2527. if (ret)
  2528. goto err;
  2529. ret = a6xx_gmu_oob_set(device, oob_gpu);
  2530. if (ret)
  2531. goto oob_clear;
  2532. ret = a6xx_gmu_hfi_start_msg(adreno_dev);
  2533. if (ret)
  2534. goto oob_clear;
  2535. /* Clear the busy_data stats - we're starting over from scratch */
  2536. memset(&adreno_dev->busy_data, 0, sizeof(adreno_dev->busy_data));
  2537. /* Restore performance counter registers with saved values */
  2538. adreno_perfcounter_restore(adreno_dev);
  2539. a6xx_start(adreno_dev);
  2540. /* Re-initialize the coresight registers if applicable */
  2541. adreno_coresight_start(adreno_dev);
  2542. adreno_perfcounter_start(adreno_dev);
  2543. /* Clear FSR here in case it is set from a previous pagefault */
  2544. kgsl_mmu_clear_fsr(&device->mmu);
  2545. a6xx_enable_gpu_irq(adreno_dev);
  2546. ret = a6xx_rb_start(adreno_dev);
  2547. if (ret) {
  2548. a6xx_disable_gpu_irq(adreno_dev);
  2549. goto oob_clear;
  2550. }
  2551. /*
  2552. * At this point it is safe to assume that we recovered. Setting
  2553. * this field allows us to take a new snapshot for the next failure
  2554. * if we are prioritizing the first unrecoverable snapshot.
  2555. */
  2556. if (device->snapshot)
  2557. device->snapshot->recovered = true;
  2558. /* Start the dispatcher */
  2559. adreno_dispatcher_start(device);
  2560. device->reset_counter++;
  2561. a6xx_gmu_oob_clear(device, oob_gpu);
  2562. if (ADRENO_QUIRK(adreno_dev, ADRENO_QUIRK_HFI_USE_REG))
  2563. gmu_core_dev_oob_clear(device, oob_boot_slumber);
  2564. return 0;
  2565. oob_clear:
  2566. a6xx_gmu_oob_clear(device, oob_gpu);
  2567. if (ADRENO_QUIRK(adreno_dev, ADRENO_QUIRK_HFI_USE_REG))
  2568. gmu_core_dev_oob_clear(device, oob_boot_slumber);
  2569. err:
  2570. a6xx_gmu_power_off(adreno_dev);
  2571. return ret;
  2572. }
  2573. static void gmu_idle_timer(struct timer_list *t)
  2574. {
  2575. struct kgsl_device *device = container_of(t, struct kgsl_device,
  2576. idle_timer);
  2577. kgsl_schedule_work(&device->idle_check_ws);
  2578. }
  2579. static int a6xx_boot(struct adreno_device *adreno_dev)
  2580. {
  2581. struct a6xx_gmu_device *gmu = to_a6xx_gmu(adreno_dev);
  2582. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  2583. int ret;
  2584. if (WARN_ON(test_bit(GMU_PRIV_GPU_STARTED, &gmu->flags)))
  2585. return 0;
  2586. kgsl_pwrctrl_request_state(device, KGSL_STATE_ACTIVE);
  2587. if (IS_ENABLED(CONFIG_QCOM_KGSL_HIBERNATION) &&
  2588. !test_bit(GMU_PRIV_PDC_RSC_LOADED, &gmu->flags))
  2589. ret = a6xx_gmu_first_boot(adreno_dev);
  2590. else
  2591. ret = a6xx_gmu_boot(adreno_dev);
  2592. if (ret)
  2593. return ret;
  2594. ret = a6xx_gpu_boot(adreno_dev);
  2595. if (ret)
  2596. return ret;
  2597. kgsl_start_idle_timer(device);
  2598. kgsl_pwrscale_wake(device);
  2599. set_bit(GMU_PRIV_GPU_STARTED, &gmu->flags);
  2600. device->pwrctrl.last_stat_updated = ktime_get();
  2601. kgsl_pwrctrl_set_state(device, KGSL_STATE_ACTIVE);
  2602. return ret;
  2603. }
  2604. static int a6xx_first_boot(struct adreno_device *adreno_dev)
  2605. {
  2606. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  2607. struct a6xx_gmu_device *gmu = to_a6xx_gmu(adreno_dev);
  2608. int ret;
  2609. if (test_bit(GMU_PRIV_FIRST_BOOT_DONE, &gmu->flags)) {
  2610. if (!test_bit(GMU_PRIV_GPU_STARTED, &gmu->flags))
  2611. return a6xx_boot(adreno_dev);
  2612. return 0;
  2613. }
  2614. KGSL_BOOT_MARKER("ADRENO Init");
  2615. ret = a6xx_ringbuffer_init(adreno_dev);
  2616. if (ret)
  2617. return ret;
  2618. ret = a6xx_microcode_read(adreno_dev);
  2619. if (ret)
  2620. return ret;
  2621. ret = a6xx_init(adreno_dev);
  2622. if (ret)
  2623. return ret;
  2624. ret = a6xx_gmu_init(adreno_dev);
  2625. if (ret)
  2626. return ret;
  2627. kgsl_pwrctrl_request_state(device, KGSL_STATE_ACTIVE);
  2628. ret = a6xx_gmu_first_boot(adreno_dev);
  2629. if (ret)
  2630. return ret;
  2631. a6xx_fusa_init(adreno_dev);
  2632. ret = a6xx_gpu_boot(adreno_dev);
  2633. if (ret)
  2634. return ret;
  2635. adreno_get_bus_counters(adreno_dev);
  2636. adreno_dev->cooperative_reset = ADRENO_FEATURE(adreno_dev,
  2637. ADRENO_COOP_RESET);
  2638. adreno_create_profile_buffer(adreno_dev);
  2639. set_bit(GMU_PRIV_FIRST_BOOT_DONE, &gmu->flags);
  2640. set_bit(GMU_PRIV_GPU_STARTED, &gmu->flags);
  2641. /*
  2642. * BCL needs respective Central Broadcast register to
  2643. * be programed from TZ. This programing happens only
  2644. * when zap shader firmware load is successful. Zap firmware
  2645. * load can fail in boot up path hence enable BCL only after we
  2646. * successfully complete first boot to ensure that Central
  2647. * Broadcast register was programed before enabling BCL.
  2648. */
  2649. if (ADRENO_FEATURE(adreno_dev, ADRENO_BCL))
  2650. adreno_dev->bcl_enabled = true;
  2651. /*
  2652. * There is a possible deadlock scenario during kgsl firmware reading
  2653. * (request_firmware) and devfreq update calls. During first boot, kgsl
  2654. * device mutex is held and then request_firmware is called for reading
  2655. * firmware. request_firmware internally takes dev_pm_qos_mtx lock.
  2656. * Whereas in case of devfreq update calls triggered by thermal/bcl or
  2657. * devfreq sysfs, it first takes the same dev_pm_qos_mtx lock and then
  2658. * tries to take kgsl device mutex as part of get_dev_status/target
  2659. * calls. This results in deadlock when both thread are unable to acquire
  2660. * the mutex held by other thread. Enable devfreq updates now as we are
  2661. * done reading all firmware files.
  2662. */
  2663. device->pwrscale.devfreq_enabled = true;
  2664. device->pwrctrl.last_stat_updated = ktime_get();
  2665. kgsl_pwrctrl_set_state(device, KGSL_STATE_ACTIVE);
  2666. KGSL_BOOT_MARKER("ADRENO Ready");
  2667. return 0;
  2668. }
  2669. static int a630_vbif_halt(struct adreno_device *adreno_dev)
  2670. {
  2671. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  2672. int ret;
  2673. kgsl_regwrite(device, A6XX_VBIF_XIN_HALT_CTRL0,
  2674. A6XX_VBIF_XIN_HALT_CTRL0_MASK);
  2675. ret = adreno_wait_for_halt_ack(device,
  2676. A6XX_VBIF_XIN_HALT_CTRL1,
  2677. A6XX_VBIF_XIN_HALT_CTRL0_MASK);
  2678. kgsl_regwrite(device, A6XX_VBIF_XIN_HALT_CTRL0, 0);
  2679. return ret;
  2680. }
  2681. static int a6xx_power_off(struct adreno_device *adreno_dev)
  2682. {
  2683. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  2684. struct a6xx_gmu_device *gmu = to_a6xx_gmu(adreno_dev);
  2685. int ret;
  2686. WARN_ON(!test_bit(GMU_PRIV_GPU_STARTED, &gmu->flags));
  2687. adreno_suspend_context(device);
  2688. /*
  2689. * adreno_suspend_context() unlocks the device mutex, which
  2690. * could allow a concurrent thread to attempt SLUMBER sequence.
  2691. * Hence, check the flags again before proceeding with SLUMBER.
  2692. */
  2693. if (!test_bit(GMU_PRIV_GPU_STARTED, &gmu->flags))
  2694. return 0;
  2695. kgsl_pwrctrl_request_state(device, KGSL_STATE_SLUMBER);
  2696. ret = a6xx_gmu_oob_set(device, oob_gpu);
  2697. if (ret) {
  2698. a6xx_gmu_oob_clear(device, oob_gpu);
  2699. goto no_gx_power;
  2700. }
  2701. if (a6xx_irq_pending(adreno_dev)) {
  2702. a6xx_gmu_oob_clear(device, oob_gpu);
  2703. return -EBUSY;
  2704. }
  2705. kgsl_pwrscale_update_stats(device);
  2706. /* Save active coresight registers if applicable */
  2707. adreno_coresight_stop(adreno_dev);
  2708. /* Save physical performance counter values before GPU power down*/
  2709. adreno_perfcounter_save(adreno_dev);
  2710. /*
  2711. * Clear GX halt on non-gbif targets. For targets with GBIF,
  2712. * GX halt is handled by the GMU FW.
  2713. */
  2714. if (adreno_is_a630(adreno_dev))
  2715. a630_vbif_halt(adreno_dev);
  2716. adreno_irqctrl(adreno_dev, 0);
  2717. a6xx_gmu_oob_clear(device, oob_gpu);
  2718. no_gx_power:
  2719. kgsl_pwrctrl_irq(device, false);
  2720. a6xx_gmu_power_off(adreno_dev);
  2721. adreno_set_active_ctxs_null(adreno_dev);
  2722. adreno_dispatcher_stop(adreno_dev);
  2723. adreno_ringbuffer_stop(adreno_dev);
  2724. adreno_llcc_slice_deactivate(adreno_dev);
  2725. clear_bit(GMU_PRIV_GPU_STARTED, &gmu->flags);
  2726. del_timer_sync(&device->idle_timer);
  2727. kgsl_pwrscale_sleep(device);
  2728. kgsl_pwrctrl_clear_l3_vote(device);
  2729. kgsl_pwrctrl_set_state(device, KGSL_STATE_SLUMBER);
  2730. return ret;
  2731. }
  2732. static void gmu_idle_check(struct work_struct *work)
  2733. {
  2734. struct kgsl_device *device = container_of(work,
  2735. struct kgsl_device, idle_check_ws);
  2736. struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
  2737. struct a6xx_gmu_device *gmu = to_a6xx_gmu(adreno_dev);
  2738. int ret;
  2739. mutex_lock(&device->mutex);
  2740. if (test_bit(GMU_DISABLE_SLUMBER, &device->gmu_core.flags))
  2741. goto done;
  2742. if (atomic_read(&device->active_cnt) || time_is_after_jiffies(device->idle_jiffies)) {
  2743. kgsl_pwrscale_update(device);
  2744. kgsl_start_idle_timer(device);
  2745. goto done;
  2746. }
  2747. if (!test_bit(GMU_PRIV_GPU_STARTED, &gmu->flags))
  2748. goto done;
  2749. spin_lock(&device->submit_lock);
  2750. if (device->submit_now) {
  2751. spin_unlock(&device->submit_lock);
  2752. kgsl_pwrscale_update(device);
  2753. kgsl_start_idle_timer(device);
  2754. goto done;
  2755. }
  2756. device->skip_inline_submit = true;
  2757. spin_unlock(&device->submit_lock);
  2758. ret = a6xx_power_off(adreno_dev);
  2759. if (ret == -EBUSY) {
  2760. kgsl_pwrscale_update(device);
  2761. kgsl_start_idle_timer(device);
  2762. }
  2763. done:
  2764. mutex_unlock(&device->mutex);
  2765. }
  2766. static int a6xx_gmu_first_open(struct adreno_device *adreno_dev)
  2767. {
  2768. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  2769. int ret;
  2770. /*
  2771. * Do the one time settings that need to happen when we
  2772. * attempt to boot the gpu the very first time
  2773. */
  2774. ret = a6xx_first_boot(adreno_dev);
  2775. if (ret)
  2776. return ret;
  2777. /*
  2778. * A client that does a first_open but never closes the device
  2779. * may prevent us from going back to SLUMBER. So trigger the idle
  2780. * check by incrementing the active count and immediately releasing it.
  2781. */
  2782. atomic_inc(&device->active_cnt);
  2783. a6xx_gmu_active_count_put(adreno_dev);
  2784. return 0;
  2785. }
  2786. static int a6xx_gmu_last_close(struct adreno_device *adreno_dev)
  2787. {
  2788. struct a6xx_gmu_device *gmu = to_a6xx_gmu(adreno_dev);
  2789. if (test_bit(GMU_PRIV_GPU_STARTED, &gmu->flags))
  2790. return a6xx_power_off(adreno_dev);
  2791. return 0;
  2792. }
  2793. static int a6xx_gmu_active_count_get(struct adreno_device *adreno_dev)
  2794. {
  2795. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  2796. struct a6xx_gmu_device *gmu = to_a6xx_gmu(adreno_dev);
  2797. int ret = 0;
  2798. if (WARN_ON(!mutex_is_locked(&device->mutex)))
  2799. return -EINVAL;
  2800. if (test_bit(GMU_PRIV_PM_SUSPEND, &gmu->flags))
  2801. return -EINVAL;
  2802. if ((atomic_read(&device->active_cnt) == 0) &&
  2803. !test_bit(GMU_PRIV_GPU_STARTED, &gmu->flags))
  2804. ret = a6xx_boot(adreno_dev);
  2805. if (ret == 0)
  2806. atomic_inc(&device->active_cnt);
  2807. trace_kgsl_active_count(device,
  2808. (unsigned long) __builtin_return_address(0));
  2809. return ret;
  2810. }
  2811. static int a6xx_gmu_pm_suspend(struct adreno_device *adreno_dev)
  2812. {
  2813. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  2814. struct a6xx_gmu_device *gmu = to_a6xx_gmu(adreno_dev);
  2815. int ret;
  2816. if (test_bit(GMU_PRIV_PM_SUSPEND, &gmu->flags))
  2817. return 0;
  2818. kgsl_pwrctrl_request_state(device, KGSL_STATE_SUSPEND);
  2819. /* Halt any new submissions */
  2820. reinit_completion(&device->halt_gate);
  2821. /* wait for active count so device can be put in slumber */
  2822. ret = kgsl_active_count_wait(device, 0, HZ);
  2823. if (ret) {
  2824. dev_err(device->dev,
  2825. "Timed out waiting for the active count\n");
  2826. goto err;
  2827. }
  2828. ret = adreno_idle(device);
  2829. if (ret)
  2830. goto err;
  2831. if (test_bit(GMU_PRIV_GPU_STARTED, &gmu->flags))
  2832. a6xx_power_off(adreno_dev);
  2833. set_bit(GMU_PRIV_PM_SUSPEND, &gmu->flags);
  2834. adreno_get_gpu_halt(adreno_dev);
  2835. kgsl_pwrctrl_set_state(device, KGSL_STATE_SUSPEND);
  2836. return 0;
  2837. err:
  2838. adreno_dispatcher_start(device);
  2839. return ret;
  2840. }
  2841. static void a6xx_gmu_pm_resume(struct adreno_device *adreno_dev)
  2842. {
  2843. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  2844. struct a6xx_gmu_device *gmu = to_a6xx_gmu(adreno_dev);
  2845. if (WARN(!test_bit(GMU_PRIV_PM_SUSPEND, &gmu->flags),
  2846. "resume invoked without a suspend\n"))
  2847. return;
  2848. adreno_put_gpu_halt(adreno_dev);
  2849. adreno_dispatcher_start(device);
  2850. clear_bit(GMU_PRIV_PM_SUSPEND, &gmu->flags);
  2851. }
  2852. static void a6xx_gmu_touch_wakeup(struct adreno_device *adreno_dev)
  2853. {
  2854. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  2855. struct a6xx_gmu_device *gmu = to_a6xx_gmu(adreno_dev);
  2856. int ret;
  2857. /*
  2858. * Do not wake up a suspended device or until the first boot sequence
  2859. * has been completed.
  2860. */
  2861. if (test_bit(GMU_PRIV_PM_SUSPEND, &gmu->flags) ||
  2862. !test_bit(GMU_PRIV_FIRST_BOOT_DONE, &gmu->flags))
  2863. return;
  2864. if (test_bit(GMU_PRIV_GPU_STARTED, &gmu->flags))
  2865. goto done;
  2866. kgsl_pwrctrl_request_state(device, KGSL_STATE_ACTIVE);
  2867. ret = a6xx_gmu_boot(adreno_dev);
  2868. if (ret)
  2869. return;
  2870. ret = a6xx_gpu_boot(adreno_dev);
  2871. if (ret)
  2872. return;
  2873. kgsl_pwrscale_wake(device);
  2874. set_bit(GMU_PRIV_GPU_STARTED, &gmu->flags);
  2875. device->pwrctrl.last_stat_updated = ktime_get();
  2876. kgsl_pwrctrl_set_state(device, KGSL_STATE_ACTIVE);
  2877. done:
  2878. /*
  2879. * When waking up from a touch event we want to stay active long enough
  2880. * for the user to send a draw command. The default idle timer timeout
  2881. * is shorter than we want so go ahead and push the idle timer out
  2882. * further for this special case
  2883. */
  2884. mod_timer(&device->idle_timer, jiffies +
  2885. msecs_to_jiffies(adreno_wake_timeout));
  2886. }
  2887. const struct adreno_power_ops a6xx_gmu_power_ops = {
  2888. .first_open = a6xx_gmu_first_open,
  2889. .last_close = a6xx_gmu_last_close,
  2890. .active_count_get = a6xx_gmu_active_count_get,
  2891. .active_count_put = a6xx_gmu_active_count_put,
  2892. .pm_suspend = a6xx_gmu_pm_suspend,
  2893. .pm_resume = a6xx_gmu_pm_resume,
  2894. .touch_wakeup = a6xx_gmu_touch_wakeup,
  2895. .gpu_clock_set = a6xx_gmu_clock_set,
  2896. .gpu_bus_set = a6xx_gmu_bus_set,
  2897. };
  2898. const struct adreno_power_ops a630_gmu_power_ops = {
  2899. .first_open = a6xx_gmu_first_open,
  2900. .last_close = a6xx_gmu_last_close,
  2901. .active_count_get = a6xx_gmu_active_count_get,
  2902. .active_count_put = a6xx_gmu_active_count_put,
  2903. .pm_suspend = a6xx_gmu_pm_suspend,
  2904. .pm_resume = a6xx_gmu_pm_resume,
  2905. .touch_wakeup = a6xx_gmu_touch_wakeup,
  2906. .gpu_clock_set = a6xx_gmu_clock_set,
  2907. };
  2908. int a6xx_gmu_device_probe(struct platform_device *pdev,
  2909. u32 chipid, const struct adreno_gpu_core *gpucore)
  2910. {
  2911. struct adreno_device *adreno_dev;
  2912. struct kgsl_device *device;
  2913. struct a6xx_device *a6xx_dev;
  2914. int ret;
  2915. a6xx_dev = devm_kzalloc(&pdev->dev, sizeof(*a6xx_dev),
  2916. GFP_KERNEL);
  2917. if (!a6xx_dev)
  2918. return -ENOMEM;
  2919. adreno_dev = &a6xx_dev->adreno_dev;
  2920. adreno_dev->irq_mask = A6XX_INT_MASK;
  2921. ret = a6xx_probe_common(pdev, adreno_dev, chipid, gpucore);
  2922. if (ret)
  2923. return ret;
  2924. ret = adreno_dispatcher_init(adreno_dev);
  2925. if (ret)
  2926. return ret;
  2927. device = KGSL_DEVICE(adreno_dev);
  2928. INIT_WORK(&device->idle_check_ws, gmu_idle_check);
  2929. timer_setup(&device->idle_timer, gmu_idle_timer, 0);
  2930. return 0;
  2931. }
  2932. int a6xx_gmu_reset(struct adreno_device *adreno_dev)
  2933. {
  2934. struct a6xx_gmu_device *gmu = to_a6xx_gmu(adreno_dev);
  2935. a6xx_disable_gpu_irq(adreno_dev);
  2936. a6xx_gmu_irq_disable(adreno_dev);
  2937. a6xx_hfi_stop(adreno_dev);
  2938. /* Hard reset the gmu and gpu */
  2939. a6xx_gmu_suspend(adreno_dev);
  2940. a6xx_reset_preempt_records(adreno_dev);
  2941. adreno_llcc_slice_deactivate(adreno_dev);
  2942. clear_bit(GMU_PRIV_GPU_STARTED, &gmu->flags);
  2943. /* Attempt to reboot the gmu and gpu */
  2944. return a6xx_boot(adreno_dev);
  2945. }
  2946. int a6xx_gmu_hfi_probe(struct adreno_device *adreno_dev)
  2947. {
  2948. struct a6xx_gmu_device *gmu = to_a6xx_gmu(adreno_dev);
  2949. struct a6xx_hfi *hfi = &gmu->hfi;
  2950. hfi->irq = kgsl_request_irq(gmu->pdev, "kgsl_hfi_irq",
  2951. a6xx_hfi_irq_handler, KGSL_DEVICE(adreno_dev));
  2952. return hfi->irq < 0 ? hfi->irq : 0;
  2953. }
  2954. int a6xx_gmu_add_to_minidump(struct adreno_device *adreno_dev)
  2955. {
  2956. struct a6xx_device *a6xx_dev = container_of(adreno_dev,
  2957. struct a6xx_device, adreno_dev);
  2958. int ret;
  2959. ret = kgsl_add_va_to_minidump(adreno_dev->dev.dev, KGSL_A6XX_DEVICE,
  2960. (void *)(a6xx_dev), sizeof(struct a6xx_device));
  2961. if (ret)
  2962. return ret;
  2963. ret = kgsl_add_va_to_minidump(adreno_dev->dev.dev, KGSL_GMU_LOG_ENTRY,
  2964. a6xx_dev->gmu.gmu_log->hostptr, a6xx_dev->gmu.gmu_log->size);
  2965. if (ret)
  2966. return ret;
  2967. ret = kgsl_add_va_to_minidump(adreno_dev->dev.dev, KGSL_HFIMEM_ENTRY,
  2968. a6xx_dev->gmu.hfi.hfi_mem->hostptr, a6xx_dev->gmu.hfi.hfi_mem->size);
  2969. if (ret)
  2970. return ret;
  2971. if (adreno_is_a630(adreno_dev) || adreno_is_a615_family(adreno_dev))
  2972. ret = kgsl_add_va_to_minidump(adreno_dev->dev.dev, KGSL_GMU_DUMPMEM_ENTRY,
  2973. a6xx_dev->gmu.dump_mem->hostptr, a6xx_dev->gmu.dump_mem->size);
  2974. return ret;
  2975. }
  2976. static int a6xx_gmu_bind(struct device *dev, struct device *master, void *data)
  2977. {
  2978. struct kgsl_device *device = dev_get_drvdata(master);
  2979. struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
  2980. const struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
  2981. const struct a6xx_gpudev *a6xx_gpudev = to_a6xx_gpudev(gpudev);
  2982. int ret;
  2983. ret = a6xx_gmu_probe(device, to_platform_device(dev));
  2984. if (ret)
  2985. return ret;
  2986. if (a6xx_gpudev->hfi_probe) {
  2987. ret = a6xx_gpudev->hfi_probe(adreno_dev);
  2988. if (ret) {
  2989. a6xx_gmu_remove(device);
  2990. return ret;
  2991. }
  2992. }
  2993. return 0;
  2994. }
  2995. static void a6xx_gmu_unbind(struct device *dev, struct device *master,
  2996. void *data)
  2997. {
  2998. struct kgsl_device *device = dev_get_drvdata(master);
  2999. struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
  3000. const struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
  3001. const struct a6xx_gpudev *a6xx_gpudev = to_a6xx_gpudev(gpudev);
  3002. if (a6xx_gpudev->hfi_remove)
  3003. a6xx_gpudev->hfi_remove(adreno_dev);
  3004. a6xx_gmu_remove(device);
  3005. }
  3006. static const struct component_ops a6xx_gmu_component_ops = {
  3007. .bind = a6xx_gmu_bind,
  3008. .unbind = a6xx_gmu_unbind,
  3009. };
  3010. static int a6xx_gmu_probe_dev(struct platform_device *pdev)
  3011. {
  3012. return component_add(&pdev->dev, &a6xx_gmu_component_ops);
  3013. }
  3014. static int a6xx_gmu_remove_dev(struct platform_device *pdev)
  3015. {
  3016. component_del(&pdev->dev, &a6xx_gmu_component_ops);
  3017. return 0;
  3018. }
  3019. static const struct of_device_id a6xx_gmu_match_table[] = {
  3020. { .compatible = "qcom,gpu-gmu" },
  3021. { },
  3022. };
  3023. struct platform_driver a6xx_gmu_driver = {
  3024. .probe = a6xx_gmu_probe_dev,
  3025. .remove = a6xx_gmu_remove_dev,
  3026. .driver = {
  3027. .name = "adreno-a6xx-gmu",
  3028. .of_match_table = a6xx_gmu_match_table,
  3029. },
  3030. };