adreno_gen7_gmu.c 88 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2021, The Linux Foundation. All rights reserved.
  4. * Copyright (c) 2022-2024, Qualcomm Innovation Center, Inc. All rights reserved.
  5. */
  6. #include <dt-bindings/regulator/qcom,rpmh-regulator-levels.h>
  7. #include <linux/clk.h>
  8. #include <linux/component.h>
  9. #include <linux/delay.h>
  10. #include <linux/dma-map-ops.h>
  11. #include <linux/firmware.h>
  12. #include <linux/interconnect.h>
  13. #include <linux/io.h>
  14. #include <linux/kobject.h>
  15. #include <linux/of_platform.h>
  16. #include <linux/qcom-iommu-util.h>
  17. #include <linux/regulator/consumer.h>
  18. #include <linux/slab.h>
  19. #include <linux/sysfs.h>
  20. #include <linux/mailbox/qmp.h>
  21. #include <soc/qcom/cmd-db.h>
  22. #include "adreno.h"
  23. #include "adreno_gen7.h"
  24. #include "adreno_trace.h"
  25. #include "kgsl_bus.h"
  26. #include "kgsl_device.h"
  27. #include "kgsl_trace.h"
  28. #include "kgsl_util.h"
  29. static struct gmu_vma_entry gen7_gmu_vma[] = {
  30. [GMU_ITCM] = {
  31. .start = 0x00000000,
  32. .size = SZ_16K,
  33. },
  34. [GMU_CACHE] = {
  35. .start = SZ_16K,
  36. .size = (SZ_16M - SZ_16K),
  37. .next_va = SZ_16K,
  38. },
  39. [GMU_DTCM] = {
  40. .start = SZ_256M + SZ_16K,
  41. .size = SZ_16K,
  42. },
  43. [GMU_DCACHE] = {
  44. .start = 0x0,
  45. .size = 0x0,
  46. },
  47. [GMU_NONCACHED_KERNEL] = {
  48. .start = 0x60000000,
  49. .size = SZ_512M,
  50. .next_va = 0x60000000,
  51. },
  52. [GMU_NONCACHED_KERNEL_EXTENDED] = {
  53. .start = 0xc0000000,
  54. .size = SZ_512M,
  55. .next_va = 0xc0000000,
  56. },
  57. };
  58. static ssize_t log_stream_enable_store(struct kobject *kobj,
  59. struct kobj_attribute *attr, const char *buf, size_t count)
  60. {
  61. struct gen7_gmu_device *gmu = container_of(kobj, struct gen7_gmu_device, log_kobj);
  62. bool val;
  63. int ret;
  64. ret = kstrtobool(buf, &val);
  65. if (ret)
  66. return ret;
  67. gmu->log_stream_enable = val;
  68. adreno_mark_for_coldboot(gen7_gmu_to_adreno(gmu));
  69. return count;
  70. }
  71. static ssize_t log_stream_enable_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
  72. {
  73. struct gen7_gmu_device *gmu = container_of(kobj, struct gen7_gmu_device, log_kobj);
  74. return scnprintf(buf, PAGE_SIZE, "%d\n", gmu->log_stream_enable);
  75. }
  76. static ssize_t log_group_mask_store(struct kobject *kobj,
  77. struct kobj_attribute *attr, const char *buf, size_t count)
  78. {
  79. struct gen7_gmu_device *gmu = container_of(kobj, struct gen7_gmu_device, log_kobj);
  80. u32 val;
  81. int ret;
  82. ret = kstrtou32(buf, 0, &val);
  83. if (ret)
  84. return ret;
  85. gmu->log_group_mask = val;
  86. adreno_mark_for_coldboot(gen7_gmu_to_adreno(gmu));
  87. return count;
  88. }
  89. static ssize_t log_group_mask_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
  90. {
  91. struct gen7_gmu_device *gmu = container_of(kobj, struct gen7_gmu_device, log_kobj);
  92. return scnprintf(buf, PAGE_SIZE, "%x\n", gmu->log_group_mask);
  93. }
  94. static struct kobj_attribute log_stream_enable_attr =
  95. __ATTR(log_stream_enable, 0644, log_stream_enable_show, log_stream_enable_store);
  96. static struct kobj_attribute log_group_mask_attr =
  97. __ATTR(log_group_mask, 0644, log_group_mask_show, log_group_mask_store);
  98. static struct attribute *log_attrs[] = {
  99. &log_stream_enable_attr.attr,
  100. &log_group_mask_attr.attr,
  101. NULL,
  102. };
  103. ATTRIBUTE_GROUPS(log);
  104. static struct kobj_type log_kobj_type = {
  105. .sysfs_ops = &kobj_sysfs_ops,
  106. .default_groups = log_groups,
  107. };
  108. static ssize_t stats_enable_store(struct kobject *kobj,
  109. struct kobj_attribute *attr, const char *buf, size_t count)
  110. {
  111. struct gen7_gmu_device *gmu = container_of(kobj, struct gen7_gmu_device, stats_kobj);
  112. bool val;
  113. int ret;
  114. ret = kstrtobool(buf, &val);
  115. if (ret)
  116. return ret;
  117. gmu->stats_enable = val;
  118. adreno_mark_for_coldboot(gen7_gmu_to_adreno(gmu));
  119. return count;
  120. }
  121. static ssize_t stats_enable_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
  122. {
  123. struct gen7_gmu_device *gmu = container_of(kobj, struct gen7_gmu_device, stats_kobj);
  124. return scnprintf(buf, PAGE_SIZE, "%d\n", gmu->stats_enable);
  125. }
  126. static ssize_t stats_mask_store(struct kobject *kobj,
  127. struct kobj_attribute *attr, const char *buf, size_t count)
  128. {
  129. struct gen7_gmu_device *gmu = container_of(kobj, struct gen7_gmu_device, stats_kobj);
  130. u32 val;
  131. int ret;
  132. ret = kstrtou32(buf, 0, &val);
  133. if (ret)
  134. return ret;
  135. gmu->stats_mask = val;
  136. adreno_mark_for_coldboot(gen7_gmu_to_adreno(gmu));
  137. return count;
  138. }
  139. static ssize_t stats_mask_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
  140. {
  141. struct gen7_gmu_device *gmu = container_of(kobj, struct gen7_gmu_device, stats_kobj);
  142. return scnprintf(buf, PAGE_SIZE, "%x\n", gmu->stats_mask);
  143. }
  144. static ssize_t stats_interval_store(struct kobject *kobj,
  145. struct kobj_attribute *attr, const char *buf, size_t count)
  146. {
  147. struct gen7_gmu_device *gmu = container_of(kobj, struct gen7_gmu_device, stats_kobj);
  148. u32 val;
  149. int ret;
  150. ret = kstrtou32(buf, 0, &val);
  151. if (ret)
  152. return ret;
  153. gmu->stats_interval = val;
  154. adreno_mark_for_coldboot(gen7_gmu_to_adreno(gmu));
  155. return count;
  156. }
  157. static ssize_t stats_interval_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
  158. {
  159. struct gen7_gmu_device *gmu = container_of(kobj, struct gen7_gmu_device, stats_kobj);
  160. return scnprintf(buf, PAGE_SIZE, "%x\n", gmu->stats_interval);
  161. }
  162. static struct kobj_attribute stats_enable_attr =
  163. __ATTR(stats_enable, 0644, stats_enable_show, stats_enable_store);
  164. static struct kobj_attribute stats_mask_attr =
  165. __ATTR(stats_mask, 0644, stats_mask_show, stats_mask_store);
  166. static struct kobj_attribute stats_interval_attr =
  167. __ATTR(stats_interval, 0644, stats_interval_show, stats_interval_store);
  168. static struct attribute *stats_attrs[] = {
  169. &stats_enable_attr.attr,
  170. &stats_mask_attr.attr,
  171. &stats_interval_attr.attr,
  172. NULL,
  173. };
  174. ATTRIBUTE_GROUPS(stats);
  175. static struct kobj_type stats_kobj_type = {
  176. .sysfs_ops = &kobj_sysfs_ops,
  177. .default_groups = stats_groups,
  178. };
  179. static int gen7_timed_poll_check_rscc(struct gen7_gmu_device *gmu,
  180. unsigned int offset, unsigned int expected_ret,
  181. unsigned int timeout, unsigned int mask)
  182. {
  183. u32 value;
  184. return readl_poll_timeout(gmu->rscc_virt + (offset << 2), value,
  185. (value & mask) == expected_ret, 100, timeout * 1000);
  186. }
  187. struct gen7_gmu_device *to_gen7_gmu(struct adreno_device *adreno_dev)
  188. {
  189. struct gen7_device *gen7_dev = container_of(adreno_dev,
  190. struct gen7_device, adreno_dev);
  191. return &gen7_dev->gmu;
  192. }
  193. struct adreno_device *gen7_gmu_to_adreno(struct gen7_gmu_device *gmu)
  194. {
  195. struct gen7_device *gen7_dev =
  196. container_of(gmu, struct gen7_device, gmu);
  197. return &gen7_dev->adreno_dev;
  198. }
  199. #define RSC_CMD_OFFSET 2
  200. static void _regwrite(void __iomem *regbase,
  201. unsigned int offsetwords, unsigned int value)
  202. {
  203. void __iomem *reg;
  204. reg = regbase + (offsetwords << 2);
  205. __raw_writel(value, reg);
  206. }
  207. void gen7_load_rsc_ucode(struct adreno_device *adreno_dev)
  208. {
  209. struct gen7_gmu_device *gmu = to_gen7_gmu(adreno_dev);
  210. void __iomem *rscc = gmu->rscc_virt;
  211. unsigned int seq_offset = GEN7_RSCC_SEQ_MEM_0_DRV0;
  212. /* Disable SDE clock gating */
  213. _regwrite(rscc, GEN7_GPU_RSCC_RSC_STATUS0_DRV0, BIT(24));
  214. /* Setup RSC PDC handshake for sleep and wakeup */
  215. _regwrite(rscc, GEN7_RSCC_PDC_SLAVE_ID_DRV0, 1);
  216. _regwrite(rscc, GEN7_RSCC_HIDDEN_TCS_CMD0_DATA, 0);
  217. _regwrite(rscc, GEN7_RSCC_HIDDEN_TCS_CMD0_ADDR, 0);
  218. _regwrite(rscc, GEN7_RSCC_HIDDEN_TCS_CMD0_DATA + RSC_CMD_OFFSET, 0);
  219. _regwrite(rscc, GEN7_RSCC_HIDDEN_TCS_CMD0_ADDR + RSC_CMD_OFFSET, 0);
  220. _regwrite(rscc, GEN7_RSCC_HIDDEN_TCS_CMD0_DATA + RSC_CMD_OFFSET * 2,
  221. adreno_is_gen7_2_x_family(adreno_dev) ? 0x80000021 : 0x80000000);
  222. _regwrite(rscc, GEN7_RSCC_HIDDEN_TCS_CMD0_ADDR + RSC_CMD_OFFSET * 2, 0);
  223. _regwrite(rscc, GEN7_RSCC_OVERRIDE_START_ADDR, 0);
  224. _regwrite(rscc, GEN7_RSCC_PDC_SEQ_START_ADDR, 0x4520);
  225. _regwrite(rscc, GEN7_RSCC_PDC_MATCH_VALUE_LO, 0x4510);
  226. _regwrite(rscc, GEN7_RSCC_PDC_MATCH_VALUE_HI, 0x4514);
  227. if (adreno_is_gen7_2_x_family(adreno_dev))
  228. seq_offset = GEN7_2_0_RSCC_SEQ_MEM_0_DRV0;
  229. /* Load RSC sequencer uCode for sleep and wakeup */
  230. _regwrite(rscc, seq_offset, 0xeaaae5a0);
  231. _regwrite(rscc, seq_offset + 1, 0xe1a1ebab);
  232. _regwrite(rscc, seq_offset + 2, 0xa2e0a581);
  233. _regwrite(rscc, seq_offset + 3, 0xecac82e2);
  234. _regwrite(rscc, seq_offset + 4, 0x0020edad);
  235. }
  236. int gen7_load_pdc_ucode(struct adreno_device *adreno_dev)
  237. {
  238. struct gen7_gmu_device *gmu = to_gen7_gmu(adreno_dev);
  239. struct resource *res_cfg;
  240. void __iomem *cfg = NULL;
  241. res_cfg = platform_get_resource_byname(gmu->pdev, IORESOURCE_MEM,
  242. "gmu_pdc");
  243. if (res_cfg)
  244. cfg = ioremap(res_cfg->start, resource_size(res_cfg));
  245. if (!cfg) {
  246. dev_err(&gmu->pdev->dev, "Failed to map PDC CFG\n");
  247. return -ENODEV;
  248. }
  249. /* Setup GPU PDC */
  250. _regwrite(cfg, GEN7_PDC_GPU_SEQ_START_ADDR, 0);
  251. _regwrite(cfg, GEN7_PDC_GPU_ENABLE_PDC, 0x80000001);
  252. iounmap(cfg);
  253. return 0;
  254. }
  255. /* Configure and enable GMU low power mode */
  256. static void gen7_gmu_power_config(struct adreno_device *adreno_dev)
  257. {
  258. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  259. /* Disable GMU WB/RB buffer and caches at boot */
  260. gmu_core_regwrite(device, GEN7_GMU_SYS_BUS_CONFIG, 0x1);
  261. gmu_core_regwrite(device, GEN7_GMU_ICACHE_CONFIG, 0x1);
  262. gmu_core_regwrite(device, GEN7_GMU_DCACHE_CONFIG, 0x1);
  263. }
  264. static void gmu_ao_sync_event(struct adreno_device *adreno_dev)
  265. {
  266. const struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
  267. unsigned long flags;
  268. u64 ticks;
  269. /*
  270. * Get the GMU always on ticks and log it in a trace message. This
  271. * will be used to map GMU ticks to ftrace time. Do this in atomic
  272. * context to ensure nothing happens between reading the always
  273. * on ticks and doing the trace.
  274. */
  275. local_irq_save(flags);
  276. ticks = gpudev->read_alwayson(adreno_dev);
  277. trace_gmu_ao_sync(ticks);
  278. local_irq_restore(flags);
  279. }
  280. int gen7_gmu_device_start(struct adreno_device *adreno_dev)
  281. {
  282. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  283. struct gen7_gmu_device *gmu = to_gen7_gmu(adreno_dev);
  284. gmu_core_reset_trace_header(&gmu->trace);
  285. gmu_ao_sync_event(adreno_dev);
  286. /* Bring GMU out of reset */
  287. gmu_core_regwrite(device, GEN7_GMU_CM3_SYSRESET, 0);
  288. /* Make sure the write is posted before moving ahead */
  289. wmb();
  290. if (gmu_core_timed_poll_check(device, GEN7_GMU_CM3_FW_INIT_RESULT,
  291. BIT(8), 100, GENMASK(8, 0))) {
  292. dev_err(&gmu->pdev->dev, "GMU failed to come out of reset\n");
  293. gmu_core_fault_snapshot(device);
  294. return -ETIMEDOUT;
  295. }
  296. return 0;
  297. }
  298. /*
  299. * gen7_gmu_hfi_start() - Write registers and start HFI.
  300. * @device: Pointer to KGSL device
  301. */
  302. int gen7_gmu_hfi_start(struct adreno_device *adreno_dev)
  303. {
  304. struct gen7_gmu_device *gmu = to_gen7_gmu(adreno_dev);
  305. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  306. gmu_core_regwrite(device, GEN7_GMU_HFI_CTRL_INIT, 1);
  307. if (gmu_core_timed_poll_check(device, GEN7_GMU_HFI_CTRL_STATUS,
  308. BIT(0), 100, BIT(0))) {
  309. dev_err(&gmu->pdev->dev, "GMU HFI init failed\n");
  310. gmu_core_fault_snapshot(device);
  311. return -ETIMEDOUT;
  312. }
  313. return 0;
  314. }
  315. int gen7_rscc_wakeup_sequence(struct adreno_device *adreno_dev)
  316. {
  317. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  318. struct gen7_gmu_device *gmu = to_gen7_gmu(adreno_dev);
  319. struct device *dev = &gmu->pdev->dev;
  320. /* Skip wakeup sequence if we didn't do the sleep sequence */
  321. if (!test_bit(GMU_PRIV_RSCC_SLEEP_DONE, &gmu->flags))
  322. return 0;
  323. /* RSC wake sequence */
  324. gmu_core_regwrite(device, GEN7_GMU_RSCC_CONTROL_REQ, BIT(1));
  325. /* Write request before polling */
  326. wmb();
  327. if (gmu_core_timed_poll_check(device, GEN7_GMU_RSCC_CONTROL_ACK,
  328. BIT(1), 100, BIT(1))) {
  329. dev_err(dev, "Failed to do GPU RSC power on\n");
  330. return -ETIMEDOUT;
  331. }
  332. if (gen7_timed_poll_check_rscc(gmu, GEN7_RSCC_SEQ_BUSY_DRV0,
  333. 0x0, 100, UINT_MAX)) {
  334. dev_err(dev, "GPU RSC sequence stuck in waking up GPU\n");
  335. return -ETIMEDOUT;
  336. }
  337. gmu_core_regwrite(device, GEN7_GMU_RSCC_CONTROL_REQ, 0);
  338. clear_bit(GMU_PRIV_RSCC_SLEEP_DONE, &gmu->flags);
  339. return 0;
  340. }
  341. int gen7_rscc_sleep_sequence(struct adreno_device *adreno_dev)
  342. {
  343. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  344. struct gen7_gmu_device *gmu = to_gen7_gmu(adreno_dev);
  345. int ret;
  346. if (!test_bit(GMU_PRIV_FIRST_BOOT_DONE, &gmu->flags))
  347. return 0;
  348. if (test_bit(GMU_PRIV_RSCC_SLEEP_DONE, &gmu->flags))
  349. return 0;
  350. gmu_core_regwrite(device, GEN7_GMU_CM3_SYSRESET, 1);
  351. /* Make sure M3 is in reset before going on */
  352. wmb();
  353. gmu_core_regread(device, GEN7_GMU_GENERAL_9, &gmu->log_wptr_retention);
  354. gmu_core_regwrite(device, GEN7_GMU_RSCC_CONTROL_REQ, BIT(0));
  355. /* Make sure the request completes before continuing */
  356. wmb();
  357. ret = gen7_timed_poll_check_rscc(gmu, GEN7_GPU_RSCC_RSC_STATUS0_DRV0,
  358. BIT(16), 100, BIT(16));
  359. if (ret) {
  360. dev_err(&gmu->pdev->dev, "GPU RSC power off fail\n");
  361. return -ETIMEDOUT;
  362. }
  363. gmu_core_regwrite(device, GEN7_GMU_RSCC_CONTROL_REQ, 0);
  364. set_bit(GMU_PRIV_RSCC_SLEEP_DONE, &gmu->flags);
  365. return 0;
  366. }
  367. static struct kgsl_memdesc *find_gmu_memdesc(struct gen7_gmu_device *gmu,
  368. u32 addr, u32 size)
  369. {
  370. int i;
  371. for (i = 0; i < gmu->global_entries; i++) {
  372. struct kgsl_memdesc *md = &gmu->gmu_globals[i];
  373. if ((addr >= md->gmuaddr) &&
  374. (((addr + size) <= (md->gmuaddr + md->size))))
  375. return md;
  376. }
  377. return NULL;
  378. }
  379. static int find_vma_block(struct gen7_gmu_device *gmu, u32 addr, u32 size)
  380. {
  381. int i;
  382. for (i = 0; i < GMU_MEM_TYPE_MAX; i++) {
  383. struct gmu_vma_entry *vma = &gmu->vma[i];
  384. if ((addr >= vma->start) &&
  385. ((addr + size) <= (vma->start + vma->size)))
  386. return i;
  387. }
  388. return -ENOENT;
  389. }
  390. static void load_tcm(struct adreno_device *adreno_dev, const u8 *src,
  391. u32 tcm_start, u32 base, const struct gmu_block_header *blk)
  392. {
  393. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  394. u32 tcm_offset = tcm_start + ((blk->addr - base)/sizeof(u32));
  395. kgsl_regmap_bulk_write(&device->regmap, tcm_offset, src,
  396. blk->size >> 2);
  397. }
  398. int gen7_gmu_load_fw(struct adreno_device *adreno_dev)
  399. {
  400. struct gen7_gmu_device *gmu = to_gen7_gmu(adreno_dev);
  401. const u8 *fw = (const u8 *)gmu->fw_image->data;
  402. while (fw < gmu->fw_image->data + gmu->fw_image->size) {
  403. const struct gmu_block_header *blk =
  404. (const struct gmu_block_header *)fw;
  405. int id;
  406. fw += sizeof(*blk);
  407. /* Don't deal with zero size blocks */
  408. if (blk->size == 0)
  409. continue;
  410. id = find_vma_block(gmu, blk->addr, blk->size);
  411. if (id < 0) {
  412. dev_err(&gmu->pdev->dev,
  413. "Unknown block in GMU FW addr:0x%x size:0x%x\n",
  414. blk->addr, blk->size);
  415. return -EINVAL;
  416. }
  417. if (id == GMU_ITCM) {
  418. load_tcm(adreno_dev, fw,
  419. GEN7_GMU_CM3_ITCM_START,
  420. gmu->vma[GMU_ITCM].start, blk);
  421. } else if (id == GMU_DTCM) {
  422. load_tcm(adreno_dev, fw,
  423. GEN7_GMU_CM3_DTCM_START,
  424. gmu->vma[GMU_DTCM].start, blk);
  425. } else {
  426. /* The firmware block for memory needs to be copied on first boot only */
  427. if (!test_bit(GMU_PRIV_FIRST_BOOT_DONE, &gmu->flags)) {
  428. struct kgsl_memdesc *md =
  429. find_gmu_memdesc(gmu, blk->addr, blk->size);
  430. if (!md) {
  431. dev_err(&gmu->pdev->dev,
  432. "No backing memory for GMU FW block addr:0x%x size:0x%x\n",
  433. blk->addr, blk->size);
  434. return -EINVAL;
  435. }
  436. memcpy(md->hostptr + (blk->addr - md->gmuaddr), fw,
  437. blk->size);
  438. }
  439. }
  440. fw += blk->size;
  441. }
  442. /* Proceed only after the FW is written */
  443. wmb();
  444. return 0;
  445. }
  446. static const char *oob_to_str(enum oob_request req)
  447. {
  448. switch (req) {
  449. case oob_gpu:
  450. return "oob_gpu";
  451. case oob_perfcntr:
  452. return "oob_perfcntr";
  453. case oob_boot_slumber:
  454. return "oob_boot_slumber";
  455. case oob_dcvs:
  456. return "oob_dcvs";
  457. default:
  458. return "unknown";
  459. }
  460. }
  461. static void trigger_reset_recovery(struct adreno_device *adreno_dev,
  462. enum oob_request req)
  463. {
  464. /*
  465. * Trigger recovery for perfcounter oob only since only
  466. * perfcounter oob can happen alongside an actively rendering gpu.
  467. */
  468. if (req != oob_perfcntr)
  469. return;
  470. if (adreno_dev->dispatch_ops && adreno_dev->dispatch_ops->fault)
  471. adreno_dev->dispatch_ops->fault(adreno_dev,
  472. ADRENO_GMU_FAULT_SKIP_SNAPSHOT);
  473. }
  474. int gen7_gmu_oob_set(struct kgsl_device *device,
  475. enum oob_request req)
  476. {
  477. struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
  478. struct gen7_gmu_device *gmu = to_gen7_gmu(adreno_dev);
  479. int ret = 0;
  480. int set, check;
  481. if (req == oob_perfcntr && gmu->num_oob_perfcntr++)
  482. return 0;
  483. if (req >= oob_boot_slumber) {
  484. dev_err(&gmu->pdev->dev,
  485. "Unsupported OOB request %s\n",
  486. oob_to_str(req));
  487. return -EINVAL;
  488. }
  489. set = BIT(30 - req * 2);
  490. check = BIT(31 - req);
  491. gmu_core_regwrite(device, GEN7_GMU_HOST2GMU_INTR_SET, set);
  492. if (gmu_core_timed_poll_check(device, GEN7_GMU_GMU2HOST_INTR_INFO, check,
  493. 100, check)) {
  494. if (req == oob_perfcntr)
  495. gmu->num_oob_perfcntr--;
  496. gmu_core_fault_snapshot(device);
  497. ret = -ETIMEDOUT;
  498. WARN(1, "OOB request %s timed out\n", oob_to_str(req));
  499. trigger_reset_recovery(adreno_dev, req);
  500. }
  501. gmu_core_regwrite(device, GEN7_GMU_GMU2HOST_INTR_CLR, check);
  502. trace_kgsl_gmu_oob_set(set);
  503. return ret;
  504. }
  505. void gen7_gmu_oob_clear(struct kgsl_device *device,
  506. enum oob_request req)
  507. {
  508. struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
  509. struct gen7_gmu_device *gmu = to_gen7_gmu(adreno_dev);
  510. int clear = BIT(31 - req * 2);
  511. if (req == oob_perfcntr && --gmu->num_oob_perfcntr)
  512. return;
  513. if (req >= oob_boot_slumber) {
  514. dev_err(&gmu->pdev->dev, "Unsupported OOB clear %s\n",
  515. oob_to_str(req));
  516. return;
  517. }
  518. gmu_core_regwrite(device, GEN7_GMU_HOST2GMU_INTR_SET, clear);
  519. trace_kgsl_gmu_oob_clear(clear);
  520. }
  521. void gen7_gmu_irq_enable(struct adreno_device *adreno_dev)
  522. {
  523. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  524. struct gen7_gmu_device *gmu = to_gen7_gmu(adreno_dev);
  525. struct gen7_hfi *hfi = &gmu->hfi;
  526. /* Clear pending IRQs and Unmask needed IRQs */
  527. gmu_core_regwrite(device, GEN7_GMU_GMU2HOST_INTR_CLR, UINT_MAX);
  528. gmu_core_regwrite(device, GEN7_GMU_AO_HOST_INTERRUPT_CLR, UINT_MAX);
  529. gmu_core_regwrite(device, GEN7_GMU_GMU2HOST_INTR_MASK,
  530. (unsigned int)~HFI_IRQ_MASK);
  531. gmu_core_regwrite(device, GEN7_GMU_AO_HOST_INTERRUPT_MASK,
  532. (unsigned int)~GMU_AO_INT_MASK);
  533. /* Enable all IRQs on host */
  534. enable_irq(hfi->irq);
  535. enable_irq(gmu->irq);
  536. }
  537. void gen7_gmu_irq_disable(struct adreno_device *adreno_dev)
  538. {
  539. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  540. struct gen7_gmu_device *gmu = to_gen7_gmu(adreno_dev);
  541. struct gen7_hfi *hfi = &gmu->hfi;
  542. /* Disable all IRQs on host */
  543. disable_irq(gmu->irq);
  544. disable_irq(hfi->irq);
  545. /* Mask all IRQs and clear pending IRQs */
  546. gmu_core_regwrite(device, GEN7_GMU_GMU2HOST_INTR_MASK, UINT_MAX);
  547. gmu_core_regwrite(device, GEN7_GMU_AO_HOST_INTERRUPT_MASK, UINT_MAX);
  548. gmu_core_regwrite(device, GEN7_GMU_GMU2HOST_INTR_CLR, UINT_MAX);
  549. gmu_core_regwrite(device, GEN7_GMU_AO_HOST_INTERRUPT_CLR, UINT_MAX);
  550. }
  551. static int gen7_gmu_hfi_start_msg(struct adreno_device *adreno_dev)
  552. {
  553. struct hfi_start_cmd req;
  554. int ret;
  555. ret = CMD_MSG_HDR(req, H2F_MSG_START);
  556. if (ret)
  557. return ret;
  558. return gen7_hfi_send_generic_req(adreno_dev, &req, sizeof(req));
  559. }
  560. static u32 gen7_rscc_tcsm_drv0_status_reglist[] = {
  561. GEN7_RSCC_TCS0_DRV0_STATUS,
  562. GEN7_RSCC_TCS1_DRV0_STATUS,
  563. GEN7_RSCC_TCS2_DRV0_STATUS,
  564. GEN7_RSCC_TCS3_DRV0_STATUS,
  565. GEN7_RSCC_TCS4_DRV0_STATUS,
  566. GEN7_RSCC_TCS5_DRV0_STATUS,
  567. GEN7_RSCC_TCS6_DRV0_STATUS,
  568. GEN7_RSCC_TCS7_DRV0_STATUS,
  569. GEN7_RSCC_TCS8_DRV0_STATUS,
  570. GEN7_RSCC_TCS9_DRV0_STATUS,
  571. };
  572. static u32 gen7_2_0_rscc_tcsm_drv0_status_reglist[] = {
  573. GEN7_2_0_RSCC_TCS0_DRV0_STATUS,
  574. GEN7_2_0_RSCC_TCS1_DRV0_STATUS,
  575. GEN7_2_0_RSCC_TCS2_DRV0_STATUS,
  576. GEN7_2_0_RSCC_TCS3_DRV0_STATUS,
  577. GEN7_2_0_RSCC_TCS4_DRV0_STATUS,
  578. GEN7_2_0_RSCC_TCS5_DRV0_STATUS,
  579. GEN7_2_0_RSCC_TCS6_DRV0_STATUS,
  580. GEN7_2_0_RSCC_TCS7_DRV0_STATUS,
  581. GEN7_2_0_RSCC_TCS8_DRV0_STATUS,
  582. GEN7_2_0_RSCC_TCS9_DRV0_STATUS,
  583. };
  584. static int gen7_complete_rpmh_votes(struct gen7_gmu_device *gmu,
  585. u32 timeout)
  586. {
  587. struct adreno_device *adreno_dev = gen7_gmu_to_adreno(gmu);
  588. int i, ret = 0;
  589. if (adreno_is_gen7_2_x_family(adreno_dev)) {
  590. for (i = 0; i < ARRAY_SIZE(gen7_2_0_rscc_tcsm_drv0_status_reglist); i++)
  591. ret |= gen7_timed_poll_check_rscc(gmu,
  592. gen7_2_0_rscc_tcsm_drv0_status_reglist[i], BIT(0), timeout,
  593. BIT(0));
  594. } else {
  595. for (i = 0; i < ARRAY_SIZE(gen7_rscc_tcsm_drv0_status_reglist); i++)
  596. ret |= gen7_timed_poll_check_rscc(gmu,
  597. gen7_rscc_tcsm_drv0_status_reglist[i], BIT(0), timeout,
  598. BIT(0));
  599. }
  600. if (ret)
  601. dev_err(&gmu->pdev->dev, "RPMH votes timedout: %d\n", ret);
  602. return ret;
  603. }
  604. #define GX_GDSC_POWER_OFF BIT(0)
  605. #define GX_CLK_OFF BIT(1)
  606. #define is_on(val) (!(val & (GX_GDSC_POWER_OFF | GX_CLK_OFF)))
  607. bool gen7_gmu_gx_is_on(struct adreno_device *adreno_dev)
  608. {
  609. unsigned int val;
  610. gmu_core_regread(KGSL_DEVICE(adreno_dev),
  611. GEN7_GMU_GFX_PWR_CLK_STATUS, &val);
  612. return is_on(val);
  613. }
  614. static const char *idle_level_name(int level)
  615. {
  616. if (level == GPU_HW_ACTIVE)
  617. return "GPU_HW_ACTIVE";
  618. else if (level == GPU_HW_IFPC)
  619. return "GPU_HW_IFPC";
  620. return "(Unknown)";
  621. }
  622. int gen7_gmu_wait_for_lowest_idle(struct adreno_device *adreno_dev)
  623. {
  624. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  625. struct gen7_gmu_device *gmu = to_gen7_gmu(adreno_dev);
  626. const struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
  627. u32 reg, reg1, reg2, reg3, reg4;
  628. unsigned long t;
  629. u64 ts1, ts2;
  630. ts1 = gpudev->read_alwayson(adreno_dev);
  631. t = jiffies + msecs_to_jiffies(100);
  632. do {
  633. gmu_core_regread(device,
  634. GEN7_GPU_GMU_CX_GMU_RPMH_POWER_STATE, &reg);
  635. gmu_core_regread(device, GEN7_GMU_GFX_PWR_CLK_STATUS, &reg1);
  636. /*
  637. * Check that we are at lowest level. If lowest level is IFPC
  638. * double check that GFX clock is off.
  639. */
  640. if (gmu->idle_level == reg)
  641. if (!(gmu->idle_level == GPU_HW_IFPC && is_on(reg1)))
  642. return 0;
  643. /* Wait 100us to reduce unnecessary AHB bus traffic */
  644. usleep_range(10, 100);
  645. } while (!time_after(jiffies, t));
  646. /* Check one last time */
  647. gmu_core_regread(device, GEN7_GPU_GMU_CX_GMU_RPMH_POWER_STATE, &reg);
  648. gmu_core_regread(device, GEN7_GMU_GFX_PWR_CLK_STATUS, &reg1);
  649. /*
  650. * Check that we are at lowest level. If lowest level is IFPC
  651. * double check that GFX clock is off.
  652. */
  653. if (gmu->idle_level == reg)
  654. if (!(gmu->idle_level == GPU_HW_IFPC && is_on(reg1)))
  655. return 0;
  656. ts2 = gpudev->read_alwayson(adreno_dev);
  657. /* Collect abort data to help with debugging */
  658. gmu_core_regread(device, GEN7_GPU_GMU_AO_GPU_CX_BUSY_STATUS, &reg2);
  659. gmu_core_regread(device, GEN7_GMU_RBBM_INT_UNMASKED_STATUS, &reg3);
  660. gmu_core_regread(device, GEN7_GMU_GMU_PWR_COL_KEEPALIVE, &reg4);
  661. dev_err(&gmu->pdev->dev,
  662. "----------------------[ GMU error ]----------------------\n");
  663. dev_err(&gmu->pdev->dev, "Timeout waiting for lowest idle level %s\n",
  664. idle_level_name(gmu->idle_level));
  665. dev_err(&gmu->pdev->dev, "Start: %llx (absolute ticks)\n", ts1);
  666. dev_err(&gmu->pdev->dev, "Poll: %llx (ticks relative to start)\n", ts2-ts1);
  667. dev_err(&gmu->pdev->dev, "RPMH_POWER_STATE=%x GFX_PWR_CLK_STATUS=%x\n", reg, reg1);
  668. dev_err(&gmu->pdev->dev, "CX_BUSY_STATUS=%x\n", reg2);
  669. dev_err(&gmu->pdev->dev, "RBBM_INT_UNMASKED_STATUS=%x PWR_COL_KEEPALIVE=%x\n", reg3, reg4);
  670. /* Access GX registers only when GX is ON */
  671. if (is_on(reg1)) {
  672. kgsl_regread(device, GEN7_CP_STATUS_1, &reg2);
  673. kgsl_regread(device, GEN7_CP_CP2GMU_STATUS, &reg3);
  674. kgsl_regread(device, GEN7_CP_CONTEXT_SWITCH_CNTL, &reg4);
  675. dev_err(&gmu->pdev->dev, "GEN7_CP_STATUS_1=%x\n", reg2);
  676. dev_err(&gmu->pdev->dev, "CP2GMU_STATUS=%x CONTEXT_SWITCH_CNTL=%x\n", reg3, reg4);
  677. }
  678. WARN_ON(1);
  679. gmu_core_fault_snapshot(device);
  680. return -ETIMEDOUT;
  681. }
  682. /* Bitmask for GPU idle status check */
  683. #define CXGXCPUBUSYIGNAHB BIT(30)
  684. int gen7_gmu_wait_for_idle(struct adreno_device *adreno_dev)
  685. {
  686. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  687. struct gen7_gmu_device *gmu = to_gen7_gmu(adreno_dev);
  688. const struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
  689. u32 status2;
  690. u64 ts1;
  691. ts1 = gpudev->read_alwayson(adreno_dev);
  692. if (gmu_core_timed_poll_check(device, GEN7_GPU_GMU_AO_GPU_CX_BUSY_STATUS,
  693. 0, 100, CXGXCPUBUSYIGNAHB)) {
  694. gmu_core_regread(device,
  695. GEN7_GPU_GMU_AO_GPU_CX_BUSY_STATUS2, &status2);
  696. dev_err(&gmu->pdev->dev,
  697. "GMU not idling: status2=0x%x %llx %llx\n",
  698. status2, ts1,
  699. gpudev->read_alwayson(adreno_dev));
  700. gmu_core_fault_snapshot(device);
  701. return -ETIMEDOUT;
  702. }
  703. return 0;
  704. }
  705. int gen7_gmu_version_info(struct adreno_device *adreno_dev)
  706. {
  707. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  708. struct gen7_gmu_device *gmu = to_gen7_gmu(adreno_dev);
  709. const struct adreno_gen7_core *gen7_core = to_gen7_core(adreno_dev);
  710. /* GMU version info is at a fixed offset in the DTCM */
  711. gmu_core_regread(device, GEN7_GMU_CM3_DTCM_START + 0xff8,
  712. &gmu->ver.core);
  713. gmu_core_regread(device, GEN7_GMU_CM3_DTCM_START + 0xff9,
  714. &gmu->ver.core_dev);
  715. gmu_core_regread(device, GEN7_GMU_CM3_DTCM_START + 0xffa,
  716. &gmu->ver.pwr);
  717. gmu_core_regread(device, GEN7_GMU_CM3_DTCM_START + 0xffb,
  718. &gmu->ver.pwr_dev);
  719. gmu_core_regread(device, GEN7_GMU_CM3_DTCM_START + 0xffc,
  720. &gmu->ver.hfi);
  721. /* Check if gmu fw version on device is compatible with kgsl driver */
  722. if (gmu->ver.core < gen7_core->gmu_fw_version) {
  723. dev_err_once(&gmu->pdev->dev,
  724. "GMU FW version 0x%x error (expected 0x%x)\n",
  725. gmu->ver.core, gen7_core->gmu_fw_version);
  726. return -EINVAL;
  727. }
  728. return 0;
  729. }
  730. int gen7_gmu_itcm_shadow(struct adreno_device *adreno_dev)
  731. {
  732. struct gen7_gmu_device *gmu = to_gen7_gmu(adreno_dev);
  733. u32 i, *dest;
  734. if (gmu->itcm_shadow)
  735. return 0;
  736. gmu->itcm_shadow = vzalloc(gmu->vma[GMU_ITCM].size);
  737. if (!gmu->itcm_shadow)
  738. return -ENOMEM;
  739. dest = (u32 *)gmu->itcm_shadow;
  740. for (i = 0; i < (gmu->vma[GMU_ITCM].size >> 2); i++)
  741. gmu_core_regread(KGSL_DEVICE(adreno_dev),
  742. GEN7_GMU_CM3_ITCM_START + i, dest++);
  743. return 0;
  744. }
  745. void gen7_gmu_register_config(struct adreno_device *adreno_dev)
  746. {
  747. struct gen7_gmu_device *gmu = to_gen7_gmu(adreno_dev);
  748. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  749. u32 val;
  750. /* Clear any previously set cm3 fault */
  751. atomic_set(&gmu->cm3_fault, 0);
  752. /* Vote veto for FAL10 */
  753. gmu_core_regwrite(device, GEN7_GPU_GMU_CX_GMU_CX_FALNEXT_INTF, 0x1);
  754. gmu_core_regwrite(device, GEN7_GPU_GMU_CX_GMU_CX_FAL_INTF, 0x1);
  755. /* Clear init result to make sure we are getting fresh value */
  756. gmu_core_regwrite(device, GEN7_GMU_CM3_FW_INIT_RESULT, 0);
  757. gmu_core_regwrite(device, GEN7_GMU_CM3_BOOT_CONFIG, 0x2);
  758. gmu_core_regwrite(device, GEN7_GMU_HFI_QTBL_ADDR,
  759. gmu->hfi.hfi_mem->gmuaddr);
  760. gmu_core_regwrite(device, GEN7_GMU_HFI_QTBL_INFO, 1);
  761. gmu_core_regwrite(device, GEN7_GMU_AHB_FENCE_RANGE_0, BIT(31) |
  762. FIELD_PREP(GENMASK(30, 18), 0x32) |
  763. FIELD_PREP(GENMASK(17, 0), 0x8a0));
  764. /*
  765. * Make sure that CM3 state is at reset value. Snapshot is changing
  766. * NMI bit and if we boot up GMU with NMI bit set GMU will boot
  767. * straight in to NMI handler without executing __main code
  768. */
  769. gmu_core_regwrite(device, GEN7_GMU_CM3_CFG, 0x4052);
  770. /**
  771. * We may have asserted gbif halt as part of reset sequence which may
  772. * not get cleared if the gdsc was not reset. So clear it before
  773. * attempting GMU boot.
  774. */
  775. kgsl_regwrite(device, GEN7_GBIF_HALT, 0x0);
  776. /* Set vrb address before starting GMU */
  777. if (!IS_ERR_OR_NULL(gmu->vrb))
  778. gmu_core_regwrite(device, GEN7_GMU_GENERAL_11, gmu->vrb->gmuaddr);
  779. /* Set the log wptr index */
  780. gmu_core_regwrite(device, GEN7_GMU_GENERAL_9,
  781. gmu->log_wptr_retention);
  782. /* Pass chipid to GMU FW, must happen before starting GMU */
  783. gmu_core_regwrite(device, GEN7_GMU_GENERAL_10,
  784. ADRENO_GMU_REV(ADRENO_GPUREV(adreno_dev)));
  785. /* Log size is encoded in (number of 4K units - 1) */
  786. val = (gmu->gmu_log->gmuaddr & GENMASK(31, 12)) |
  787. ((GMU_LOG_SIZE/SZ_4K - 1) & GENMASK(7, 0));
  788. gmu_core_regwrite(device, GEN7_GMU_GENERAL_8, val);
  789. /* Configure power control and bring the GMU out of reset */
  790. gen7_gmu_power_config(adreno_dev);
  791. /*
  792. * Enable BCL throttling -
  793. * XOCLK1: countable: 0x13 (25% throttle)
  794. * XOCLK2: countable: 0x17 (58% throttle)
  795. * XOCLK3: countable: 0x19 (75% throttle)
  796. * POWER_CONTROL_SELECT_0 controls counters 0 - 3, each selector
  797. * is 8 bits wide.
  798. */
  799. if (adreno_dev->bcl_enabled)
  800. gmu_core_regrmw(device, GEN7_GMU_CX_GMU_POWER_COUNTER_SELECT_0,
  801. 0xffffff00, FIELD_PREP(GENMASK(31, 24), 0x19) |
  802. FIELD_PREP(GENMASK(23, 16), 0x17) |
  803. FIELD_PREP(GENMASK(15, 8), 0x13));
  804. }
  805. static struct gmu_vma_node *find_va(struct gmu_vma_entry *vma, u32 addr, u32 size)
  806. {
  807. struct rb_node *node = vma->vma_root.rb_node;
  808. while (node != NULL) {
  809. struct gmu_vma_node *data = rb_entry(node, struct gmu_vma_node, node);
  810. if (addr + size <= data->va)
  811. node = node->rb_left;
  812. else if (addr >= data->va + data->size)
  813. node = node->rb_right;
  814. else
  815. return data;
  816. }
  817. return NULL;
  818. }
  819. /* Return true if VMA supports dynamic allocations */
  820. static bool vma_is_dynamic(int vma_id)
  821. {
  822. /* Dynamic allocations are done in the GMU_NONCACHED_KERNEL space */
  823. return vma_id == GMU_NONCACHED_KERNEL;
  824. }
  825. static int insert_va(struct gmu_vma_entry *vma, u32 addr, u32 size)
  826. {
  827. struct rb_node **node, *parent = NULL;
  828. struct gmu_vma_node *new = kzalloc(sizeof(*new), GFP_NOWAIT);
  829. if (new == NULL)
  830. return -ENOMEM;
  831. new->va = addr;
  832. new->size = size;
  833. node = &vma->vma_root.rb_node;
  834. while (*node != NULL) {
  835. struct gmu_vma_node *this;
  836. parent = *node;
  837. this = rb_entry(parent, struct gmu_vma_node, node);
  838. if (addr + size <= this->va)
  839. node = &parent->rb_left;
  840. else if (addr >= this->va + this->size)
  841. node = &parent->rb_right;
  842. else {
  843. kfree(new);
  844. return -EEXIST;
  845. }
  846. }
  847. /* Add new node and rebalance tree */
  848. rb_link_node(&new->node, parent, node);
  849. rb_insert_color(&new->node, &vma->vma_root);
  850. return 0;
  851. }
  852. static u32 find_unmapped_va(struct gmu_vma_entry *vma, u32 size, u32 va_align)
  853. {
  854. struct rb_node *node = rb_first(&vma->vma_root);
  855. u32 cur = vma->start;
  856. bool found = false;
  857. cur = ALIGN(cur, va_align);
  858. while (node) {
  859. struct gmu_vma_node *data = rb_entry(node, struct gmu_vma_node, node);
  860. if (cur + size <= data->va) {
  861. found = true;
  862. break;
  863. }
  864. cur = ALIGN(data->va + data->size, va_align);
  865. node = rb_next(node);
  866. }
  867. /* Do we have space after the last node? */
  868. if (!found && (cur + size <= vma->start + vma->size))
  869. found = true;
  870. return found ? cur : 0;
  871. }
  872. static int _map_gmu_dynamic(struct gen7_gmu_device *gmu,
  873. struct kgsl_memdesc *md,
  874. u32 addr, u32 vma_id, int attrs, u32 align)
  875. {
  876. int ret;
  877. struct gmu_vma_entry *vma = &gmu->vma[vma_id];
  878. struct gmu_vma_node *vma_node = NULL;
  879. u32 size = ALIGN(md->size, hfi_get_gmu_sz_alignment(align));
  880. spin_lock(&vma->lock);
  881. if (!addr) {
  882. /*
  883. * We will end up with a hole (GMU VA range not backed by physical mapping) if
  884. * the aligned size is greater than the size of the physical mapping
  885. */
  886. addr = find_unmapped_va(vma, size, hfi_get_gmu_va_alignment(align));
  887. if (addr == 0) {
  888. spin_unlock(&vma->lock);
  889. dev_err(&gmu->pdev->dev,
  890. "Insufficient VA space size: %x\n", size);
  891. return -ENOMEM;
  892. }
  893. }
  894. ret = insert_va(vma, addr, size);
  895. spin_unlock(&vma->lock);
  896. if (ret < 0) {
  897. dev_err(&gmu->pdev->dev,
  898. "Could not insert va: %x size %x\n", addr, size);
  899. return ret;
  900. }
  901. ret = gmu_core_map_memdesc(gmu->domain, md, addr, attrs);
  902. if (!ret) {
  903. md->gmuaddr = addr;
  904. return 0;
  905. }
  906. /* Failed to map to GMU */
  907. dev_err(&gmu->pdev->dev,
  908. "Unable to map GMU kernel block: addr:0x%08x size:0x%llx :%d\n",
  909. addr, md->size, ret);
  910. spin_lock(&vma->lock);
  911. vma_node = find_va(vma, md->gmuaddr, size);
  912. if (vma_node)
  913. rb_erase(&vma_node->node, &vma->vma_root);
  914. spin_unlock(&vma->lock);
  915. kfree(vma_node);
  916. return ret;
  917. }
  918. static int _map_gmu_static(struct gen7_gmu_device *gmu,
  919. struct kgsl_memdesc *md,
  920. u32 addr, u32 vma_id, int attrs, u32 align)
  921. {
  922. int ret;
  923. struct gmu_vma_entry *vma = &gmu->vma[vma_id];
  924. u32 size = ALIGN(md->size, hfi_get_gmu_sz_alignment(align));
  925. if (!addr)
  926. addr = ALIGN(vma->next_va, hfi_get_gmu_va_alignment(align));
  927. ret = gmu_core_map_memdesc(gmu->domain, md, addr, attrs);
  928. if (ret) {
  929. dev_err(&gmu->pdev->dev,
  930. "Unable to map GMU kernel block: addr:0x%08x size:0x%llx :%d\n",
  931. addr, md->size, ret);
  932. return ret;
  933. }
  934. md->gmuaddr = addr;
  935. /*
  936. * We will end up with a hole (GMU VA range not backed by physical mapping) if the aligned
  937. * size is greater than the size of the physical mapping
  938. */
  939. vma->next_va = md->gmuaddr + size;
  940. return 0;
  941. }
  942. static int _map_gmu(struct gen7_gmu_device *gmu,
  943. struct kgsl_memdesc *md,
  944. u32 addr, u32 vma_id, int attrs, u32 align)
  945. {
  946. return vma_is_dynamic(vma_id) ?
  947. _map_gmu_dynamic(gmu, md, addr, vma_id, attrs, align) :
  948. _map_gmu_static(gmu, md, addr, vma_id, attrs, align);
  949. }
  950. int gen7_gmu_import_buffer(struct gen7_gmu_device *gmu, u32 vma_id,
  951. struct kgsl_memdesc *md, u32 attrs, u32 align)
  952. {
  953. return _map_gmu(gmu, md, 0, vma_id, attrs, align);
  954. }
  955. struct kgsl_memdesc *gen7_reserve_gmu_kernel_block(struct gen7_gmu_device *gmu,
  956. u32 addr, u32 size, u32 vma_id, u32 align)
  957. {
  958. int ret;
  959. struct kgsl_memdesc *md;
  960. struct kgsl_device *device = KGSL_DEVICE(gen7_gmu_to_adreno(gmu));
  961. int attrs = IOMMU_READ | IOMMU_WRITE | IOMMU_PRIV;
  962. if (gmu->global_entries == ARRAY_SIZE(gmu->gmu_globals))
  963. return ERR_PTR(-ENOMEM);
  964. md = &gmu->gmu_globals[gmu->global_entries];
  965. ret = kgsl_allocate_kernel(device, md, size, 0, KGSL_MEMDESC_SYSMEM);
  966. if (ret) {
  967. memset(md, 0x0, sizeof(*md));
  968. return ERR_PTR(-ENOMEM);
  969. }
  970. ret = _map_gmu(gmu, md, addr, vma_id, attrs, align);
  971. if (ret) {
  972. kgsl_sharedmem_free(md);
  973. memset(md, 0x0, sizeof(*md));
  974. return ERR_PTR(ret);
  975. }
  976. gmu->global_entries++;
  977. return md;
  978. }
  979. struct kgsl_memdesc *gen7_reserve_gmu_kernel_block_fixed(struct gen7_gmu_device *gmu,
  980. u32 addr, u32 size, u32 vma_id, const char *resource, int attrs, u32 align)
  981. {
  982. int ret;
  983. struct kgsl_memdesc *md;
  984. struct kgsl_device *device = KGSL_DEVICE(gen7_gmu_to_adreno(gmu));
  985. if (gmu->global_entries == ARRAY_SIZE(gmu->gmu_globals))
  986. return ERR_PTR(-ENOMEM);
  987. md = &gmu->gmu_globals[gmu->global_entries];
  988. ret = kgsl_memdesc_init_fixed(device, gmu->pdev, resource, md);
  989. if (ret)
  990. return ERR_PTR(ret);
  991. ret = _map_gmu(gmu, md, addr, vma_id, attrs, align);
  992. sg_free_table(md->sgt);
  993. kfree(md->sgt);
  994. md->sgt = NULL;
  995. if (!ret)
  996. gmu->global_entries++;
  997. else {
  998. dev_err(&gmu->pdev->dev,
  999. "Unable to map GMU kernel block: addr:0x%08x size:0x%llx :%d\n",
  1000. addr, md->size, ret);
  1001. memset(md, 0x0, sizeof(*md));
  1002. md = ERR_PTR(ret);
  1003. }
  1004. return md;
  1005. }
  1006. int gen7_alloc_gmu_kernel_block(struct gen7_gmu_device *gmu,
  1007. struct kgsl_memdesc *md, u32 size, u32 vma_id, int attrs)
  1008. {
  1009. int ret;
  1010. struct kgsl_device *device = KGSL_DEVICE(gen7_gmu_to_adreno(gmu));
  1011. ret = kgsl_allocate_kernel(device, md, size, 0, KGSL_MEMDESC_SYSMEM);
  1012. if (ret)
  1013. return ret;
  1014. ret = _map_gmu(gmu, md, 0, vma_id, attrs, 0);
  1015. if (ret)
  1016. kgsl_sharedmem_free(md);
  1017. return ret;
  1018. }
  1019. void gen7_free_gmu_block(struct gen7_gmu_device *gmu, struct kgsl_memdesc *md)
  1020. {
  1021. int vma_id = find_vma_block(gmu, md->gmuaddr, md->size);
  1022. struct gmu_vma_entry *vma;
  1023. struct gmu_vma_node *vma_node;
  1024. if ((vma_id < 0) || !vma_is_dynamic(vma_id))
  1025. return;
  1026. vma = &gmu->vma[vma_id];
  1027. /*
  1028. * Do not remove the vma node if we failed to unmap the entire buffer. This is because the
  1029. * iommu driver considers remapping an already mapped iova as fatal.
  1030. */
  1031. if (md->size != iommu_unmap(gmu->domain, md->gmuaddr, md->size))
  1032. goto free;
  1033. spin_lock(&vma->lock);
  1034. vma_node = find_va(vma, md->gmuaddr, md->size);
  1035. if (vma_node)
  1036. rb_erase(&vma_node->node, &vma->vma_root);
  1037. spin_unlock(&vma->lock);
  1038. kfree(vma_node);
  1039. free:
  1040. kgsl_sharedmem_free(md);
  1041. }
  1042. static int gen7_gmu_process_prealloc(struct gen7_gmu_device *gmu,
  1043. struct gmu_block_header *blk)
  1044. {
  1045. struct kgsl_memdesc *md;
  1046. int id = find_vma_block(gmu, blk->addr, blk->value);
  1047. if (id < 0) {
  1048. dev_err(&gmu->pdev->dev,
  1049. "Invalid prealloc block addr: 0x%x value:%d\n",
  1050. blk->addr, blk->value);
  1051. return id;
  1052. }
  1053. /* Nothing to do for TCM blocks or user uncached */
  1054. if (id == GMU_ITCM || id == GMU_DTCM || id == GMU_NONCACHED_USER)
  1055. return 0;
  1056. /* Check if the block is already allocated */
  1057. md = find_gmu_memdesc(gmu, blk->addr, blk->value);
  1058. if (md != NULL)
  1059. return 0;
  1060. md = gen7_reserve_gmu_kernel_block(gmu, blk->addr, blk->value, id, 0);
  1061. return PTR_ERR_OR_ZERO(md);
  1062. }
  1063. int gen7_gmu_parse_fw(struct adreno_device *adreno_dev)
  1064. {
  1065. struct gen7_gmu_device *gmu = to_gen7_gmu(adreno_dev);
  1066. const struct adreno_gen7_core *gen7_core = to_gen7_core(adreno_dev);
  1067. struct gmu_block_header *blk;
  1068. int ret, offset = 0;
  1069. const char *gmufw_name = gen7_core->gmufw_name;
  1070. /*
  1071. * If GMU fw already saved and verified, do nothing new.
  1072. * Skip only request_firmware and allow preallocation to
  1073. * ensure in scenario where GMU request firmware succeeded
  1074. * but preallocation fails, we don't return early without
  1075. * successful preallocations on next open call.
  1076. */
  1077. if (!gmu->fw_image) {
  1078. if (gen7_core->gmufw_name == NULL)
  1079. return -EINVAL;
  1080. ret = request_firmware(&gmu->fw_image, gmufw_name,
  1081. &gmu->pdev->dev);
  1082. if (ret) {
  1083. if (gen7_core->gmufw_bak_name) {
  1084. gmufw_name = gen7_core->gmufw_bak_name;
  1085. ret = request_firmware(&gmu->fw_image, gmufw_name,
  1086. &gmu->pdev->dev);
  1087. }
  1088. if (ret) {
  1089. dev_err(&gmu->pdev->dev,
  1090. "request_firmware (%s) failed: %d\n",
  1091. gmufw_name, ret);
  1092. return ret;
  1093. }
  1094. }
  1095. }
  1096. /*
  1097. * Zero payload fw blocks contain metadata and are
  1098. * guaranteed to precede fw load data. Parse the
  1099. * metadata blocks.
  1100. */
  1101. while (offset < gmu->fw_image->size) {
  1102. blk = (struct gmu_block_header *)&gmu->fw_image->data[offset];
  1103. if (offset + sizeof(*blk) > gmu->fw_image->size) {
  1104. dev_err(&gmu->pdev->dev, "Invalid FW Block\n");
  1105. return -EINVAL;
  1106. }
  1107. /* Done with zero length blocks so return */
  1108. if (blk->size)
  1109. break;
  1110. offset += sizeof(*blk);
  1111. if (blk->type == GMU_BLK_TYPE_PREALLOC_REQ ||
  1112. blk->type == GMU_BLK_TYPE_PREALLOC_PERSIST_REQ) {
  1113. ret = gen7_gmu_process_prealloc(gmu, blk);
  1114. if (ret)
  1115. return ret;
  1116. }
  1117. }
  1118. return 0;
  1119. }
  1120. int gen7_gmu_memory_init(struct adreno_device *adreno_dev)
  1121. {
  1122. struct gen7_gmu_device *gmu = to_gen7_gmu(adreno_dev);
  1123. /* GMU master log */
  1124. if (IS_ERR_OR_NULL(gmu->gmu_log))
  1125. gmu->gmu_log = gen7_reserve_gmu_kernel_block(gmu, 0,
  1126. GMU_LOG_SIZE, GMU_NONCACHED_KERNEL, 0);
  1127. return PTR_ERR_OR_ZERO(gmu->gmu_log);
  1128. }
  1129. static int gen7_gmu_init(struct adreno_device *adreno_dev)
  1130. {
  1131. int ret;
  1132. ret = gen7_gmu_parse_fw(adreno_dev);
  1133. if (ret)
  1134. return ret;
  1135. ret = gen7_gmu_memory_init(adreno_dev);
  1136. if (ret)
  1137. return ret;
  1138. return gen7_hfi_init(adreno_dev);
  1139. }
  1140. static void _do_gbif_halt(struct kgsl_device *device, u32 reg, u32 ack_reg,
  1141. u32 mask, const char *client)
  1142. {
  1143. u32 ack;
  1144. unsigned long t;
  1145. kgsl_regwrite(device, reg, mask);
  1146. t = jiffies + msecs_to_jiffies(100);
  1147. do {
  1148. kgsl_regread(device, ack_reg, &ack);
  1149. if ((ack & mask) == mask)
  1150. return;
  1151. /*
  1152. * If we are attempting recovery in case of stall-on-fault
  1153. * then the halt sequence will not complete as long as SMMU
  1154. * is stalled.
  1155. */
  1156. kgsl_mmu_pagefault_resume(&device->mmu, false);
  1157. usleep_range(10, 100);
  1158. } while (!time_after(jiffies, t));
  1159. /* Check one last time */
  1160. kgsl_mmu_pagefault_resume(&device->mmu, false);
  1161. kgsl_regread(device, ack_reg, &ack);
  1162. if ((ack & mask) == mask)
  1163. return;
  1164. dev_err(device->dev, "%s GBIF halt timed out\n", client);
  1165. }
  1166. static void gen7_gmu_pwrctrl_suspend(struct adreno_device *adreno_dev)
  1167. {
  1168. int ret = 0;
  1169. struct gen7_gmu_device *gmu = to_gen7_gmu(adreno_dev);
  1170. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  1171. struct kgsl_pwrctrl *pwr = &device->pwrctrl;
  1172. /* Disconnect GPU from BUS is not needed if CX GDSC goes off later */
  1173. /*
  1174. * GEMNOC can enter power collapse state during GPU power down sequence.
  1175. * This could abort CX GDSC collapse. Assert Qactive to avoid this.
  1176. */
  1177. gmu_core_regwrite(device, GEN7_GPU_GMU_CX_GMU_CX_FALNEXT_INTF, 0x1);
  1178. /* Check no outstanding RPMh voting */
  1179. gen7_complete_rpmh_votes(gmu, 1);
  1180. /* Clear the WRITEDROPPED fields and set fence to allow mode */
  1181. gmu_core_regwrite(device, GEN7_GMU_AHB_FENCE_STATUS_CLR, 0x7);
  1182. gmu_core_regwrite(device, GEN7_GMU_AO_AHB_FENCE_CTRL, 0);
  1183. /* Make sure above writes are committed before we proceed to recovery */
  1184. wmb();
  1185. gmu_core_regwrite(device, GEN7_GMU_CM3_SYSRESET, 1);
  1186. /* Halt GX traffic */
  1187. if (gen7_gmu_gx_is_on(adreno_dev))
  1188. _do_gbif_halt(device, GEN7_RBBM_GBIF_HALT,
  1189. GEN7_RBBM_GBIF_HALT_ACK,
  1190. GEN7_GBIF_GX_HALT_MASK,
  1191. "GX");
  1192. /* Halt CX traffic */
  1193. _do_gbif_halt(device, GEN7_GBIF_HALT, GEN7_GBIF_HALT_ACK,
  1194. GEN7_GBIF_ARB_HALT_MASK, "CX");
  1195. if (gen7_gmu_gx_is_on(adreno_dev))
  1196. kgsl_regwrite(device, GEN7_RBBM_SW_RESET_CMD, 0x1);
  1197. /* Make sure above writes are posted before turning off power resources */
  1198. wmb();
  1199. /* Allow the software reset to complete */
  1200. udelay(100);
  1201. /*
  1202. * This is based on the assumption that GMU is the only one controlling
  1203. * the GX HS. This code path is the only client voting for GX through
  1204. * the regulator interface.
  1205. */
  1206. if (pwr->gx_gdsc) {
  1207. if (gen7_gmu_gx_is_on(adreno_dev)) {
  1208. /* Switch gx gdsc control from GMU to CPU
  1209. * force non-zero reference count in clk driver
  1210. * so next disable call will turn
  1211. * off the GDSC
  1212. */
  1213. ret = regulator_enable(pwr->gx_gdsc);
  1214. if (ret)
  1215. dev_err(&gmu->pdev->dev,
  1216. "suspend fail: gx enable %d\n", ret);
  1217. ret = regulator_disable(pwr->gx_gdsc);
  1218. if (ret)
  1219. dev_err(&gmu->pdev->dev,
  1220. "suspend fail: gx disable %d\n", ret);
  1221. if (gen7_gmu_gx_is_on(adreno_dev))
  1222. dev_err(&gmu->pdev->dev,
  1223. "gx is stuck on\n");
  1224. }
  1225. }
  1226. }
  1227. /*
  1228. * gen7_gmu_notify_slumber() - initiate request to GMU to prepare to slumber
  1229. * @device: Pointer to KGSL device
  1230. */
  1231. static int gen7_gmu_notify_slumber(struct adreno_device *adreno_dev)
  1232. {
  1233. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  1234. struct kgsl_pwrctrl *pwr = &device->pwrctrl;
  1235. struct gen7_gmu_device *gmu = to_gen7_gmu(adreno_dev);
  1236. int bus_level = pwr->pwrlevels[pwr->default_pwrlevel].bus_freq;
  1237. int perf_idx = gmu->dcvs_table.gpu_level_num -
  1238. pwr->default_pwrlevel - 1;
  1239. struct hfi_prep_slumber_cmd req = {
  1240. .freq = perf_idx,
  1241. .bw = bus_level,
  1242. };
  1243. int ret;
  1244. req.bw |= gen7_bus_ab_quantize(adreno_dev, 0);
  1245. /* Disable the power counter so that the GMU is not busy */
  1246. gmu_core_regwrite(device, GEN7_GMU_CX_GMU_POWER_COUNTER_ENABLE, 0);
  1247. ret = CMD_MSG_HDR(req, H2F_MSG_PREPARE_SLUMBER);
  1248. if (ret)
  1249. return ret;
  1250. ret = gen7_hfi_send_generic_req(adreno_dev, &req, sizeof(req));
  1251. /* Make sure the fence is in ALLOW mode */
  1252. gmu_core_regwrite(device, GEN7_GMU_AO_AHB_FENCE_CTRL, 0);
  1253. /*
  1254. * GEMNOC can enter power collapse state during GPU power down sequence.
  1255. * This could abort CX GDSC collapse. Assert Qactive to avoid this.
  1256. */
  1257. gmu_core_regwrite(device, GEN7_GPU_GMU_CX_GMU_CX_FALNEXT_INTF, 0x1);
  1258. return ret;
  1259. }
  1260. void gen7_gmu_suspend(struct adreno_device *adreno_dev)
  1261. {
  1262. struct gen7_gmu_device *gmu = to_gen7_gmu(adreno_dev);
  1263. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  1264. gen7_gmu_pwrctrl_suspend(adreno_dev);
  1265. clk_bulk_disable_unprepare(gmu->num_clks, gmu->clks);
  1266. kgsl_pwrctrl_disable_cx_gdsc(device);
  1267. gen7_rdpm_cx_freq_update(gmu, 0);
  1268. dev_err(&gmu->pdev->dev, "Suspended GMU\n");
  1269. kgsl_pwrctrl_set_state(device, KGSL_STATE_NONE);
  1270. }
  1271. static int gen7_gmu_dcvs_set(struct adreno_device *adreno_dev,
  1272. int gpu_pwrlevel, int bus_level, u32 ab)
  1273. {
  1274. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  1275. struct kgsl_pwrctrl *pwr = &device->pwrctrl;
  1276. struct gen7_gmu_device *gmu = to_gen7_gmu(adreno_dev);
  1277. struct gen7_dcvs_table *table = &gmu->dcvs_table;
  1278. struct hfi_gx_bw_perf_vote_cmd req = {
  1279. .ack_type = DCVS_ACK_BLOCK,
  1280. .freq = INVALID_DCVS_IDX,
  1281. .bw = INVALID_DCVS_IDX,
  1282. };
  1283. int ret = 0;
  1284. if (!test_bit(GMU_PRIV_HFI_STARTED, &gmu->flags))
  1285. return 0;
  1286. /* Do not set to XO and lower GPU clock vote from GMU */
  1287. if ((gpu_pwrlevel != INVALID_DCVS_IDX) &&
  1288. (gpu_pwrlevel >= table->gpu_level_num - 1))
  1289. return -EINVAL;
  1290. if (gpu_pwrlevel < table->gpu_level_num - 1)
  1291. req.freq = table->gpu_level_num - gpu_pwrlevel - 1;
  1292. if (bus_level < pwr->ddr_table_count && bus_level > 0)
  1293. req.bw = bus_level;
  1294. req.bw |= gen7_bus_ab_quantize(adreno_dev, ab);
  1295. /* GMU will vote for slumber levels through the sleep sequence */
  1296. if ((req.freq == INVALID_DCVS_IDX) && (req.bw == INVALID_BW_VOTE))
  1297. return 0;
  1298. ret = CMD_MSG_HDR(req, H2F_MSG_GX_BW_PERF_VOTE);
  1299. if (ret)
  1300. return ret;
  1301. ret = gen7_hfi_send_generic_req(adreno_dev, &req, sizeof(req));
  1302. if (ret) {
  1303. dev_err_ratelimited(&gmu->pdev->dev,
  1304. "Failed to set GPU perf idx %u, bw idx %u\n",
  1305. req.freq, req.bw);
  1306. /*
  1307. * If this was a dcvs request along side an active gpu, request
  1308. * dispatcher based reset and recovery.
  1309. */
  1310. if (test_bit(GMU_PRIV_GPU_STARTED, &gmu->flags))
  1311. adreno_dispatcher_fault(adreno_dev, ADRENO_GMU_FAULT |
  1312. ADRENO_GMU_FAULT_SKIP_SNAPSHOT);
  1313. }
  1314. if (req.freq != INVALID_DCVS_IDX)
  1315. gen7_rdpm_mx_freq_update(gmu,
  1316. gmu->dcvs_table.gx_votes[req.freq].freq);
  1317. return ret;
  1318. }
  1319. static int gen7_gmu_clock_set(struct adreno_device *adreno_dev, u32 pwrlevel)
  1320. {
  1321. return gen7_gmu_dcvs_set(adreno_dev, pwrlevel, INVALID_DCVS_IDX, INVALID_AB_VALUE);
  1322. }
  1323. static int gen7_gmu_ifpc_store(struct kgsl_device *device,
  1324. unsigned int val)
  1325. {
  1326. struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
  1327. struct gen7_gmu_device *gmu = to_gen7_gmu(adreno_dev);
  1328. unsigned int requested_idle_level;
  1329. if (!ADRENO_FEATURE(adreno_dev, ADRENO_IFPC))
  1330. return -EINVAL;
  1331. if (val)
  1332. requested_idle_level = GPU_HW_IFPC;
  1333. else
  1334. requested_idle_level = GPU_HW_ACTIVE;
  1335. if (gmu->idle_level == requested_idle_level)
  1336. return 0;
  1337. /* Power down the GPU before changing the idle level */
  1338. return adreno_power_cycle_u32(adreno_dev, &gmu->idle_level,
  1339. requested_idle_level);
  1340. }
  1341. static unsigned int gen7_gmu_ifpc_isenabled(struct kgsl_device *device)
  1342. {
  1343. struct gen7_gmu_device *gmu = to_gen7_gmu(ADRENO_DEVICE(device));
  1344. return gmu->idle_level == GPU_HW_IFPC;
  1345. }
  1346. /* Send an NMI to the GMU */
  1347. void gen7_gmu_send_nmi(struct kgsl_device *device, bool force)
  1348. {
  1349. struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
  1350. struct gen7_gmu_device *gmu = to_gen7_gmu(adreno_dev);
  1351. u32 result;
  1352. /*
  1353. * Do not send NMI if the SMMU is stalled because GMU will not be able
  1354. * to save cm3 state to DDR.
  1355. */
  1356. if (gen7_gmu_gx_is_on(adreno_dev) && adreno_smmu_is_stalled(adreno_dev)) {
  1357. dev_err(&gmu->pdev->dev,
  1358. "Skipping NMI because SMMU is stalled\n");
  1359. return;
  1360. }
  1361. if (force)
  1362. goto nmi;
  1363. /*
  1364. * We should not send NMI if there was a CM3 fault reported because we
  1365. * don't want to overwrite the critical CM3 state captured by gmu before
  1366. * it sent the CM3 fault interrupt. Also don't send NMI if GMU reset is
  1367. * already active. We could have hit a GMU assert and NMI might have
  1368. * already been triggered.
  1369. */
  1370. /* make sure we're reading the latest cm3_fault */
  1371. smp_rmb();
  1372. if (atomic_read(&gmu->cm3_fault))
  1373. return;
  1374. gmu_core_regread(device, GEN7_GMU_CM3_FW_INIT_RESULT, &result);
  1375. if (result & 0xE00)
  1376. return;
  1377. nmi:
  1378. /* Mask so there's no interrupt caused by NMI */
  1379. gmu_core_regwrite(device, GEN7_GMU_GMU2HOST_INTR_MASK, UINT_MAX);
  1380. /* Make sure the interrupt is masked before causing it */
  1381. wmb();
  1382. /* This will cause the GMU to save it's internal state to ddr */
  1383. gmu_core_regrmw(device, GEN7_GMU_CM3_CFG, BIT(9), BIT(9));
  1384. /* Make sure the NMI is invoked before we proceed*/
  1385. wmb();
  1386. /* Wait for the NMI to be handled */
  1387. udelay(200);
  1388. }
  1389. static void gen7_gmu_cooperative_reset(struct kgsl_device *device)
  1390. {
  1391. struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
  1392. struct gen7_gmu_device *gmu = to_gen7_gmu(adreno_dev);
  1393. unsigned int result;
  1394. gmu_core_regwrite(device, GEN7_GMU_CX_GMU_WDOG_CTRL, 0);
  1395. gmu_core_regwrite(device, GEN7_GMU_HOST2GMU_INTR_SET, BIT(17));
  1396. /*
  1397. * After triggering graceful death wait for snapshot ready
  1398. * indication from GMU.
  1399. */
  1400. if (!gmu_core_timed_poll_check(device, GEN7_GMU_CM3_FW_INIT_RESULT,
  1401. 0x800, 2, 0x800))
  1402. return;
  1403. gmu_core_regread(device, GEN7_GMU_CM3_FW_INIT_RESULT, &result);
  1404. dev_err(&gmu->pdev->dev,
  1405. "GMU cooperative reset timed out 0x%x\n", result);
  1406. /*
  1407. * If we dont get a snapshot ready from GMU, trigger NMI
  1408. * and if we still timeout then we just continue with reset.
  1409. */
  1410. gen7_gmu_send_nmi(device, true);
  1411. gmu_core_regread(device, GEN7_GMU_CM3_FW_INIT_RESULT, &result);
  1412. if ((result & 0x800) != 0x800)
  1413. dev_err(&gmu->pdev->dev,
  1414. "GMU cooperative reset NMI timed out 0x%x\n", result);
  1415. }
  1416. static int gen7_gmu_wait_for_active_transition(struct kgsl_device *device)
  1417. {
  1418. unsigned int reg;
  1419. struct gen7_gmu_device *gmu = to_gen7_gmu(ADRENO_DEVICE(device));
  1420. if (gmu_core_timed_poll_check(device, GEN7_GPU_GMU_CX_GMU_RPMH_POWER_STATE,
  1421. GPU_HW_ACTIVE, 100, GENMASK(3, 0))) {
  1422. gmu_core_regread(device, GEN7_GPU_GMU_CX_GMU_RPMH_POWER_STATE, &reg);
  1423. dev_err(&gmu->pdev->dev,
  1424. "GMU failed to move to ACTIVE state, Current state: 0x%x\n",
  1425. reg);
  1426. return -ETIMEDOUT;
  1427. }
  1428. return 0;
  1429. }
  1430. static bool gen7_gmu_scales_bandwidth(struct kgsl_device *device)
  1431. {
  1432. return true;
  1433. }
  1434. void gen7_gmu_handle_watchdog(struct adreno_device *adreno_dev)
  1435. {
  1436. struct gen7_gmu_device *gmu = to_gen7_gmu(adreno_dev);
  1437. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  1438. u32 mask;
  1439. /* Temporarily mask the watchdog interrupt to prevent a storm */
  1440. gmu_core_regread(device, GEN7_GMU_AO_HOST_INTERRUPT_MASK, &mask);
  1441. gmu_core_regwrite(device, GEN7_GMU_AO_HOST_INTERRUPT_MASK,
  1442. (mask | GMU_INT_WDOG_BITE));
  1443. gen7_gmu_send_nmi(device, false);
  1444. dev_err_ratelimited(&gmu->pdev->dev,
  1445. "GMU watchdog expired interrupt received\n");
  1446. }
  1447. static irqreturn_t gen7_gmu_irq_handler(int irq, void *data)
  1448. {
  1449. struct kgsl_device *device = data;
  1450. struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
  1451. struct gen7_gmu_device *gmu = to_gen7_gmu(adreno_dev);
  1452. const struct gen7_gpudev *gen7_gpudev =
  1453. to_gen7_gpudev(ADRENO_GPU_DEVICE(adreno_dev));
  1454. unsigned int status = 0;
  1455. gmu_core_regread(device, GEN7_GMU_AO_HOST_INTERRUPT_STATUS, &status);
  1456. gmu_core_regwrite(device, GEN7_GMU_AO_HOST_INTERRUPT_CLR, status);
  1457. if (status & GMU_INT_HOST_AHB_BUS_ERR)
  1458. dev_err_ratelimited(&gmu->pdev->dev,
  1459. "AHB bus error interrupt received\n");
  1460. if (status & GMU_INT_WDOG_BITE)
  1461. gen7_gpudev->handle_watchdog(adreno_dev);
  1462. if (status & GMU_INT_FENCE_ERR) {
  1463. unsigned int fence_status;
  1464. gmu_core_regread(device, GEN7_GMU_AHB_FENCE_STATUS,
  1465. &fence_status);
  1466. dev_err_ratelimited(&gmu->pdev->dev,
  1467. "FENCE error interrupt received %x\n", fence_status);
  1468. }
  1469. if (status & ~GMU_AO_INT_MASK)
  1470. dev_err_ratelimited(&gmu->pdev->dev,
  1471. "Unhandled GMU interrupts 0x%lx\n",
  1472. status & ~GMU_AO_INT_MASK);
  1473. return IRQ_HANDLED;
  1474. }
  1475. void gen7_gmu_aop_send_acd_state(struct gen7_gmu_device *gmu, bool flag)
  1476. {
  1477. struct qmp_pkt msg;
  1478. char msg_buf[36];
  1479. u32 size;
  1480. int ret;
  1481. if (IS_ERR_OR_NULL(gmu->mailbox.channel))
  1482. return;
  1483. size = scnprintf(msg_buf, sizeof(msg_buf),
  1484. "{class: gpu, res: acd, val: %d}", flag);
  1485. /* mailbox controller expects 4-byte aligned buffer */
  1486. msg.size = ALIGN((size + 1), SZ_4);
  1487. msg.data = msg_buf;
  1488. ret = mbox_send_message(gmu->mailbox.channel, &msg);
  1489. if (ret < 0)
  1490. dev_err(&gmu->pdev->dev,
  1491. "AOP mbox send message failed: %d\n", ret);
  1492. }
  1493. int gen7_gmu_enable_clks(struct adreno_device *adreno_dev, u32 level)
  1494. {
  1495. struct gen7_gmu_device *gmu = to_gen7_gmu(adreno_dev);
  1496. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  1497. int ret;
  1498. gen7_rdpm_cx_freq_update(gmu, gmu->freqs[level] / 1000);
  1499. ret = kgsl_clk_set_rate(gmu->clks, gmu->num_clks, "gmu_clk",
  1500. gmu->freqs[level]);
  1501. if (ret) {
  1502. dev_err(&gmu->pdev->dev, "GMU clock:%d set failed:%d\n",
  1503. gmu->freqs[level], ret);
  1504. return ret;
  1505. }
  1506. ret = kgsl_clk_set_rate(gmu->clks, gmu->num_clks, "hub_clk",
  1507. adreno_dev->gmu_hub_clk_freq);
  1508. if (ret && ret != -ENODEV) {
  1509. dev_err(&gmu->pdev->dev, "Unable to set the HUB clock\n");
  1510. return ret;
  1511. }
  1512. ret = clk_bulk_prepare_enable(gmu->num_clks, gmu->clks);
  1513. if (ret) {
  1514. dev_err(&gmu->pdev->dev, "Cannot enable GMU clocks\n");
  1515. return ret;
  1516. }
  1517. device->state = KGSL_STATE_AWARE;
  1518. return 0;
  1519. }
  1520. static int gen7_gmu_first_boot(struct adreno_device *adreno_dev)
  1521. {
  1522. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  1523. struct kgsl_pwrctrl *pwr = &device->pwrctrl;
  1524. struct gen7_gmu_device *gmu = to_gen7_gmu(adreno_dev);
  1525. int level, ret;
  1526. kgsl_pwrctrl_request_state(device, KGSL_STATE_AWARE);
  1527. gen7_gmu_aop_send_acd_state(gmu, adreno_dev->acd_enabled);
  1528. ret = kgsl_pwrctrl_enable_cx_gdsc(device);
  1529. if (ret)
  1530. return ret;
  1531. ret = gen7_gmu_enable_clks(adreno_dev, 0);
  1532. if (ret)
  1533. goto gdsc_off;
  1534. /*
  1535. * Enable AHB timeout detection to catch any register access taking longer
  1536. * time before NOC timeout gets detected. Enable this logic before any
  1537. * register access which happens to be just after enabling clocks.
  1538. */
  1539. gen7_enable_ahb_timeout_detection(adreno_dev);
  1540. /* Initialize the CX timer */
  1541. gen7_cx_timer_init(adreno_dev);
  1542. ret = gen7_gmu_load_fw(adreno_dev);
  1543. if (ret)
  1544. goto clks_gdsc_off;
  1545. ret = gen7_gmu_version_info(adreno_dev);
  1546. if (ret)
  1547. goto clks_gdsc_off;
  1548. ret = gen7_gmu_itcm_shadow(adreno_dev);
  1549. if (ret)
  1550. goto clks_gdsc_off;
  1551. ret = gen7_scm_gpu_init_cx_regs(adreno_dev);
  1552. if (ret)
  1553. goto clks_gdsc_off;
  1554. gen7_gmu_register_config(adreno_dev);
  1555. gen7_gmu_irq_enable(adreno_dev);
  1556. /* Vote for minimal DDR BW for GMU to init */
  1557. level = pwr->pwrlevels[pwr->default_pwrlevel].bus_min;
  1558. icc_set_bw(pwr->icc_path, 0, kBps_to_icc(pwr->ddr_table[level]));
  1559. /* Clear any GPU faults that might have been left over */
  1560. adreno_clear_gpu_fault(adreno_dev);
  1561. ret = gen7_gmu_device_start(adreno_dev);
  1562. if (ret)
  1563. goto err;
  1564. if (!test_bit(GMU_PRIV_PDC_RSC_LOADED, &gmu->flags)) {
  1565. ret = gen7_load_pdc_ucode(adreno_dev);
  1566. if (ret)
  1567. goto err;
  1568. gen7_load_rsc_ucode(adreno_dev);
  1569. set_bit(GMU_PRIV_PDC_RSC_LOADED, &gmu->flags);
  1570. }
  1571. ret = gen7_gmu_hfi_start(adreno_dev);
  1572. if (ret)
  1573. goto err;
  1574. gen7_get_gpu_feature_info(adreno_dev);
  1575. ret = gen7_hfi_start(adreno_dev);
  1576. if (ret)
  1577. goto err;
  1578. if (gen7_hfi_send_get_value(adreno_dev, HFI_VALUE_GMU_AB_VOTE, 0) == 1 &&
  1579. !WARN_ONCE(!adreno_dev->gpucore->num_ddr_channels,
  1580. "Number of DDR channel is not specified in gpu core")) {
  1581. adreno_dev->gmu_ab = true;
  1582. set_bit(ADRENO_DEVICE_GMU_AB, &adreno_dev->priv);
  1583. }
  1584. icc_set_bw(pwr->icc_path, 0, 0);
  1585. device->gmu_fault = false;
  1586. kgsl_pwrctrl_set_state(device, KGSL_STATE_AWARE);
  1587. return 0;
  1588. err:
  1589. gen7_gmu_irq_disable(adreno_dev);
  1590. if (device->gmu_fault) {
  1591. gen7_gmu_suspend(adreno_dev);
  1592. return ret;
  1593. }
  1594. clks_gdsc_off:
  1595. clk_bulk_disable_unprepare(gmu->num_clks, gmu->clks);
  1596. gdsc_off:
  1597. kgsl_pwrctrl_disable_cx_gdsc(device);
  1598. gen7_rdpm_cx_freq_update(gmu, 0);
  1599. return ret;
  1600. }
  1601. static int gen7_gmu_boot(struct adreno_device *adreno_dev)
  1602. {
  1603. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  1604. struct gen7_gmu_device *gmu = to_gen7_gmu(adreno_dev);
  1605. int ret = 0;
  1606. kgsl_pwrctrl_request_state(device, KGSL_STATE_AWARE);
  1607. ret = kgsl_pwrctrl_enable_cx_gdsc(device);
  1608. if (ret)
  1609. return ret;
  1610. ret = gen7_gmu_enable_clks(adreno_dev, 0);
  1611. if (ret)
  1612. goto gdsc_off;
  1613. /*
  1614. * Enable AHB timeout detection to catch any register access taking longer
  1615. * time before NOC timeout gets detected. Enable this logic before any
  1616. * register access which happens to be just after enabling clocks.
  1617. */
  1618. gen7_enable_ahb_timeout_detection(adreno_dev);
  1619. /* Initialize the CX timer */
  1620. gen7_cx_timer_init(adreno_dev);
  1621. ret = gen7_rscc_wakeup_sequence(adreno_dev);
  1622. if (ret)
  1623. goto clks_gdsc_off;
  1624. ret = gen7_gmu_load_fw(adreno_dev);
  1625. if (ret)
  1626. goto clks_gdsc_off;
  1627. gen7_gmu_register_config(adreno_dev);
  1628. gen7_gmu_irq_enable(adreno_dev);
  1629. /* Clear any GPU faults that might have been left over */
  1630. adreno_clear_gpu_fault(adreno_dev);
  1631. ret = gen7_gmu_device_start(adreno_dev);
  1632. if (ret)
  1633. goto err;
  1634. ret = gen7_gmu_hfi_start(adreno_dev);
  1635. if (ret)
  1636. goto err;
  1637. ret = gen7_hfi_start(adreno_dev);
  1638. if (ret)
  1639. goto err;
  1640. device->gmu_fault = false;
  1641. kgsl_pwrctrl_set_state(device, KGSL_STATE_AWARE);
  1642. return 0;
  1643. err:
  1644. gen7_gmu_irq_disable(adreno_dev);
  1645. if (device->gmu_fault) {
  1646. gen7_gmu_suspend(adreno_dev);
  1647. return ret;
  1648. }
  1649. clks_gdsc_off:
  1650. clk_bulk_disable_unprepare(gmu->num_clks, gmu->clks);
  1651. gdsc_off:
  1652. kgsl_pwrctrl_disable_cx_gdsc(device);
  1653. gen7_rdpm_cx_freq_update(gmu, 0);
  1654. return ret;
  1655. }
  1656. static void set_acd(struct adreno_device *adreno_dev, void *priv)
  1657. {
  1658. struct gen7_gmu_device *gmu = to_gen7_gmu(adreno_dev);
  1659. adreno_dev->acd_enabled = *((bool *)priv);
  1660. gen7_gmu_aop_send_acd_state(gmu, adreno_dev->acd_enabled);
  1661. }
  1662. static int gen7_gmu_acd_set(struct kgsl_device *device, bool val)
  1663. {
  1664. struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
  1665. struct gen7_gmu_device *gmu = to_gen7_gmu(adreno_dev);
  1666. if (IS_ERR_OR_NULL(gmu->mailbox.channel))
  1667. return -EINVAL;
  1668. /* Don't do any unneeded work if ACD is already in the correct state */
  1669. if (adreno_dev->acd_enabled == val)
  1670. return 0;
  1671. /* Power cycle the GPU for changes to take effect */
  1672. return adreno_power_cycle(adreno_dev, set_acd, &val);
  1673. }
  1674. #define BCL_RESP_TYPE_MASK BIT(0)
  1675. #define BCL_SID0_MASK GENMASK(7, 1)
  1676. #define BCL_SID1_MASK GENMASK(14, 8)
  1677. #define BCL_SID2_MASK GENMASK(21, 15)
  1678. static int gen7_bcl_sid_set(struct kgsl_device *device, u32 sid_id, u64 sid_val)
  1679. {
  1680. struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
  1681. u32 bcl_data, val = (u32) sid_val;
  1682. if (!ADRENO_FEATURE(adreno_dev, ADRENO_BCL) ||
  1683. !FIELD_GET(BCL_RESP_TYPE_MASK, adreno_dev->bcl_data))
  1684. return -EINVAL;
  1685. switch (sid_id) {
  1686. case 0:
  1687. adreno_dev->bcl_data &= ~BCL_SID0_MASK;
  1688. bcl_data = adreno_dev->bcl_data | FIELD_PREP(BCL_SID0_MASK, val);
  1689. break;
  1690. case 1:
  1691. adreno_dev->bcl_data &= ~BCL_SID1_MASK;
  1692. bcl_data = adreno_dev->bcl_data | FIELD_PREP(BCL_SID1_MASK, val);
  1693. break;
  1694. case 2:
  1695. adreno_dev->bcl_data &= ~BCL_SID2_MASK;
  1696. bcl_data = adreno_dev->bcl_data | FIELD_PREP(BCL_SID2_MASK, val);
  1697. break;
  1698. default:
  1699. return -EINVAL;
  1700. }
  1701. return adreno_power_cycle_u32(adreno_dev, &adreno_dev->bcl_data, bcl_data);
  1702. }
  1703. static u64 gen7_bcl_sid_get(struct kgsl_device *device, u32 sid_id)
  1704. {
  1705. struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
  1706. if (!ADRENO_FEATURE(adreno_dev, ADRENO_BCL) ||
  1707. !FIELD_GET(BCL_RESP_TYPE_MASK, adreno_dev->bcl_data))
  1708. return 0;
  1709. switch (sid_id) {
  1710. case 0:
  1711. return ((u64) FIELD_GET(BCL_SID0_MASK, adreno_dev->bcl_data));
  1712. case 1:
  1713. return ((u64) FIELD_GET(BCL_SID1_MASK, adreno_dev->bcl_data));
  1714. case 2:
  1715. return ((u64) FIELD_GET(BCL_SID2_MASK, adreno_dev->bcl_data));
  1716. default:
  1717. return 0;
  1718. }
  1719. }
  1720. static void gen7_send_tlb_hint(struct kgsl_device *device, bool val)
  1721. {
  1722. struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
  1723. struct gen7_gmu_device *gmu = to_gen7_gmu(adreno_dev);
  1724. if (!gmu->domain)
  1725. return;
  1726. #if (KERNEL_VERSION(6, 1, 0) <= LINUX_VERSION_CODE)
  1727. qcom_skip_tlb_management(&gmu->pdev->dev, val);
  1728. #endif
  1729. if (!val)
  1730. iommu_flush_iotlb_all(gmu->domain);
  1731. }
  1732. static const struct gmu_dev_ops gen7_gmudev = {
  1733. .oob_set = gen7_gmu_oob_set,
  1734. .oob_clear = gen7_gmu_oob_clear,
  1735. .ifpc_store = gen7_gmu_ifpc_store,
  1736. .ifpc_isenabled = gen7_gmu_ifpc_isenabled,
  1737. .cooperative_reset = gen7_gmu_cooperative_reset,
  1738. .wait_for_active_transition = gen7_gmu_wait_for_active_transition,
  1739. .scales_bandwidth = gen7_gmu_scales_bandwidth,
  1740. .acd_set = gen7_gmu_acd_set,
  1741. .bcl_sid_set = gen7_bcl_sid_set,
  1742. .bcl_sid_get = gen7_bcl_sid_get,
  1743. .send_nmi = gen7_gmu_send_nmi,
  1744. .send_tlb_hint = gen7_send_tlb_hint,
  1745. };
  1746. static int gen7_gmu_bus_set(struct adreno_device *adreno_dev, int buslevel,
  1747. u32 ab)
  1748. {
  1749. const struct adreno_gen7_core *gen7_core = to_gen7_core(adreno_dev);
  1750. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  1751. struct kgsl_pwrctrl *pwr = &device->pwrctrl;
  1752. int ret = 0;
  1753. /* Skip icc path for targets that supports ACV vote from GMU */
  1754. if (!gen7_core->acv_perfmode_vote)
  1755. kgsl_icc_set_tag(pwr, buslevel);
  1756. if (buslevel == pwr->cur_buslevel)
  1757. buslevel = INVALID_DCVS_IDX;
  1758. if ((ab == pwr->cur_ab) || (ab == 0))
  1759. ab = INVALID_AB_VALUE;
  1760. if ((ab == INVALID_AB_VALUE) && (buslevel == INVALID_DCVS_IDX))
  1761. return 0;
  1762. ret = gen7_gmu_dcvs_set(adreno_dev, INVALID_DCVS_IDX,
  1763. buslevel, ab);
  1764. if (ret)
  1765. return ret;
  1766. if (buslevel != INVALID_DCVS_IDX)
  1767. pwr->cur_buslevel = buslevel;
  1768. if (ab != INVALID_AB_VALUE) {
  1769. if (!adreno_dev->gmu_ab)
  1770. icc_set_bw(pwr->icc_path, MBps_to_icc(ab), 0);
  1771. pwr->cur_ab = ab;
  1772. }
  1773. trace_kgsl_buslevel(device, pwr->active_pwrlevel, pwr->cur_buslevel, pwr->cur_ab);
  1774. return ret;
  1775. }
  1776. u32 gen7_bus_ab_quantize(struct adreno_device *adreno_dev, u32 ab)
  1777. {
  1778. u16 vote = 0;
  1779. u32 max_bw, max_ab;
  1780. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  1781. struct kgsl_pwrctrl *pwr = &device->pwrctrl;
  1782. if (!adreno_dev->gmu_ab || (ab == INVALID_AB_VALUE))
  1783. return (FIELD_PREP(GENMASK(31, 16), INVALID_AB_VALUE));
  1784. /*
  1785. * max ddr bandwidth (kbps) = (Max bw in kbps per channel * number of channel)
  1786. * max ab (Mbps) = max ddr bandwidth (kbps) / 1000
  1787. */
  1788. max_bw = pwr->ddr_table[pwr->ddr_table_count - 1] * adreno_dev->gpucore->num_ddr_channels;
  1789. max_ab = max_bw / 1000;
  1790. /*
  1791. * If requested AB is higher than theoretical max bandwidth, set AB vote as max
  1792. * allowable quantized AB value.
  1793. *
  1794. * Power FW supports a 16 bit AB BW level. We can quantize the entire vote-able BW
  1795. * range to a 16 bit space and the quantized value can be used to vote for AB though
  1796. * GMU. Quantization can be performed as below.
  1797. *
  1798. * quantized_vote = (ab vote (kbps) * 2^16) / max ddr bandwidth (kbps)
  1799. */
  1800. if (ab >= max_ab)
  1801. vote = MAX_AB_VALUE;
  1802. else
  1803. vote = (u16)(((u64)ab * 1000 * (1 << 16)) / max_bw);
  1804. /*
  1805. * Vote will be calculated as 0 for smaller AB values.
  1806. * Set a minimum non-zero vote in such cases.
  1807. */
  1808. if (ab && !vote)
  1809. vote = 0x1;
  1810. /*
  1811. * Set ab enable mask and valid AB vote. req.bw is 32 bit value 0xABABENIB
  1812. * and with this return we want to set the upper 16 bits and EN field specifies
  1813. * if the AB vote is valid or not.
  1814. */
  1815. return (FIELD_PREP(GENMASK(31, 16), vote) | FIELD_PREP(GENMASK(15, 8), 1));
  1816. }
  1817. static void gen7_free_gmu_globals(struct gen7_gmu_device *gmu)
  1818. {
  1819. int i;
  1820. for (i = 0; i < gmu->global_entries && i < ARRAY_SIZE(gmu->gmu_globals); i++) {
  1821. struct kgsl_memdesc *md = &gmu->gmu_globals[i];
  1822. if (!md->gmuaddr)
  1823. continue;
  1824. iommu_unmap(gmu->domain, md->gmuaddr, md->size);
  1825. if (md->priv & KGSL_MEMDESC_SYSMEM)
  1826. kgsl_sharedmem_free(md);
  1827. memset(md, 0, sizeof(*md));
  1828. }
  1829. if (gmu->domain) {
  1830. iommu_detach_device(gmu->domain, &gmu->pdev->dev);
  1831. iommu_domain_free(gmu->domain);
  1832. gmu->domain = NULL;
  1833. }
  1834. gmu->global_entries = 0;
  1835. }
  1836. static int gen7_gmu_aop_mailbox_init(struct adreno_device *adreno_dev,
  1837. struct gen7_gmu_device *gmu)
  1838. {
  1839. struct kgsl_mailbox *mailbox = &gmu->mailbox;
  1840. mailbox->client.dev = &gmu->pdev->dev;
  1841. mailbox->client.tx_block = true;
  1842. mailbox->client.tx_tout = 1000;
  1843. mailbox->client.knows_txdone = false;
  1844. mailbox->channel = mbox_request_channel(&mailbox->client, 0);
  1845. if (IS_ERR(mailbox->channel))
  1846. return PTR_ERR(mailbox->channel);
  1847. adreno_dev->acd_enabled = true;
  1848. return 0;
  1849. }
  1850. static void gen7_gmu_acd_probe(struct kgsl_device *device,
  1851. struct gen7_gmu_device *gmu, struct device_node *node)
  1852. {
  1853. struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
  1854. struct kgsl_pwrctrl *pwr = &device->pwrctrl;
  1855. struct kgsl_pwrlevel *pwrlevel =
  1856. &pwr->pwrlevels[pwr->num_pwrlevels - 1];
  1857. struct hfi_acd_table_cmd *cmd = &gmu->hfi.acd_table;
  1858. int ret, i, cmd_idx = 0;
  1859. if (!ADRENO_FEATURE(adreno_dev, ADRENO_ACD))
  1860. return;
  1861. cmd->hdr = CREATE_MSG_HDR(H2F_MSG_ACD_TBL, HFI_MSG_CMD);
  1862. cmd->version = 1;
  1863. cmd->stride = 1;
  1864. cmd->enable_by_level = 0;
  1865. /*
  1866. * Iterate through each gpu power level and generate a mask for GMU
  1867. * firmware for ACD enabled levels and store the corresponding control
  1868. * register configurations to the acd_table structure.
  1869. */
  1870. for (i = 0; i < pwr->num_pwrlevels; i++) {
  1871. if (pwrlevel->acd_level) {
  1872. cmd->enable_by_level |= (1 << (i + 1));
  1873. cmd->data[cmd_idx++] = pwrlevel->acd_level;
  1874. }
  1875. pwrlevel--;
  1876. }
  1877. if (!cmd->enable_by_level)
  1878. return;
  1879. cmd->num_levels = cmd_idx;
  1880. ret = gen7_gmu_aop_mailbox_init(adreno_dev, gmu);
  1881. if (ret)
  1882. dev_err(&gmu->pdev->dev,
  1883. "AOP mailbox init failed: %d\n", ret);
  1884. }
  1885. static int gen7_gmu_reg_probe(struct adreno_device *adreno_dev)
  1886. {
  1887. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  1888. struct gen7_gmu_device *gmu = to_gen7_gmu(adreno_dev);
  1889. int ret;
  1890. ret = kgsl_regmap_add_region(&device->regmap, gmu->pdev, "gmu", NULL, NULL);
  1891. if (ret)
  1892. dev_err(&gmu->pdev->dev, "Unable to map the GMU registers\n");
  1893. /*
  1894. * gmu_ao_blk_dec1 and gmu_ao_blk_dec2 are contiguous and contained within the gmu region
  1895. * mapped above. gmu_ao_blk_dec0 is not within the gmu region and is mapped separately.
  1896. */
  1897. kgsl_regmap_add_region(&device->regmap, gmu->pdev, "gmu_ao_blk_dec0", NULL, NULL);
  1898. return ret;
  1899. }
  1900. static int gen7_gmu_clk_probe(struct adreno_device *adreno_dev)
  1901. {
  1902. struct gen7_gmu_device *gmu = to_gen7_gmu(adreno_dev);
  1903. int ret, i;
  1904. int tbl_size;
  1905. int num_freqs;
  1906. int offset;
  1907. ret = devm_clk_bulk_get_all(&gmu->pdev->dev, &gmu->clks);
  1908. if (ret < 0)
  1909. return ret;
  1910. /*
  1911. * Voting for apb_pclk will enable power and clocks required for
  1912. * QDSS path to function. However, if QCOM_KGSL_QDSS_STM is not enabled,
  1913. * QDSS is essentially unusable. Hence, if QDSS cannot be used,
  1914. * don't vote for this clock.
  1915. */
  1916. if (!IS_ENABLED(CONFIG_QCOM_KGSL_QDSS_STM)) {
  1917. for (i = 0; i < ret; i++) {
  1918. if (!strcmp(gmu->clks[i].id, "apb_pclk")) {
  1919. gmu->clks[i].clk = NULL;
  1920. break;
  1921. }
  1922. }
  1923. }
  1924. gmu->num_clks = ret;
  1925. /* Read the optional list of GMU frequencies */
  1926. if (of_get_property(gmu->pdev->dev.of_node,
  1927. "qcom,gmu-freq-table", &tbl_size) == NULL)
  1928. goto default_gmu_freq;
  1929. num_freqs = (tbl_size / sizeof(u32)) / 2;
  1930. if (num_freqs != ARRAY_SIZE(gmu->freqs))
  1931. goto default_gmu_freq;
  1932. for (i = 0; i < num_freqs; i++) {
  1933. offset = i * 2;
  1934. ret = of_property_read_u32_index(gmu->pdev->dev.of_node,
  1935. "qcom,gmu-freq-table", offset, &gmu->freqs[i]);
  1936. if (ret)
  1937. goto default_gmu_freq;
  1938. ret = of_property_read_u32_index(gmu->pdev->dev.of_node,
  1939. "qcom,gmu-freq-table", offset + 1, &gmu->vlvls[i]);
  1940. if (ret)
  1941. goto default_gmu_freq;
  1942. }
  1943. return 0;
  1944. default_gmu_freq:
  1945. /* The GMU frequency table is missing or invalid. Go with a default */
  1946. gmu->freqs[0] = GMU_FREQ_MIN;
  1947. gmu->vlvls[0] = RPMH_REGULATOR_LEVEL_LOW_SVS;
  1948. gmu->freqs[1] = GMU_FREQ_MAX;
  1949. gmu->vlvls[1] = RPMH_REGULATOR_LEVEL_SVS;
  1950. return 0;
  1951. }
  1952. static void gen7_gmu_rdpm_probe(struct gen7_gmu_device *gmu,
  1953. struct kgsl_device *device)
  1954. {
  1955. struct resource *res;
  1956. res = platform_get_resource_byname(device->pdev, IORESOURCE_MEM, "rdpm_cx");
  1957. if (res)
  1958. gmu->rdpm_cx_virt = devm_ioremap(&device->pdev->dev,
  1959. res->start, resource_size(res));
  1960. res = platform_get_resource_byname(device->pdev, IORESOURCE_MEM, "rdpm_mx");
  1961. if (res)
  1962. gmu->rdpm_mx_virt = devm_ioremap(&device->pdev->dev,
  1963. res->start, resource_size(res));
  1964. }
  1965. void gen7_gmu_remove(struct kgsl_device *device)
  1966. {
  1967. struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
  1968. struct gen7_gmu_device *gmu = to_gen7_gmu(adreno_dev);
  1969. if (!IS_ERR_OR_NULL(gmu->mailbox.channel))
  1970. mbox_free_channel(gmu->mailbox.channel);
  1971. adreno_dev->acd_enabled = false;
  1972. if (gmu->fw_image)
  1973. release_firmware(gmu->fw_image);
  1974. gen7_free_gmu_globals(gmu);
  1975. vfree(gmu->itcm_shadow);
  1976. if (gmu->log_kobj.state_initialized)
  1977. kobject_put(&gmu->log_kobj);
  1978. if (gmu->stats_kobj.state_initialized)
  1979. kobject_put(&gmu->stats_kobj);
  1980. }
  1981. static int gen7_gmu_iommu_fault_handler(struct iommu_domain *domain,
  1982. struct device *dev, unsigned long addr, int flags, void *token)
  1983. {
  1984. char *fault_type = "unknown";
  1985. if (flags & IOMMU_FAULT_TRANSLATION)
  1986. fault_type = "translation";
  1987. else if (flags & IOMMU_FAULT_PERMISSION)
  1988. fault_type = "permission";
  1989. else if (flags & IOMMU_FAULT_EXTERNAL)
  1990. fault_type = "external";
  1991. else if (flags & IOMMU_FAULT_TRANSACTION_STALLED)
  1992. fault_type = "transaction stalled";
  1993. dev_err(dev, "GMU fault addr = %lX, context=kernel (%s %s fault)\n",
  1994. addr,
  1995. (flags & IOMMU_FAULT_WRITE) ? "write" : "read",
  1996. fault_type);
  1997. return 0;
  1998. }
  1999. static int gen7_gmu_iommu_init(struct gen7_gmu_device *gmu)
  2000. {
  2001. int ret;
  2002. gmu->domain = iommu_domain_alloc(&platform_bus_type);
  2003. if (gmu->domain == NULL) {
  2004. dev_err(&gmu->pdev->dev, "Unable to allocate GMU IOMMU domain\n");
  2005. return -ENODEV;
  2006. }
  2007. /*
  2008. * Disable stall on fault for the GMU context bank.
  2009. * This sets SCTLR.CFCFG = 0.
  2010. * Also note that, the smmu driver sets SCTLR.HUPCF = 0 by default.
  2011. */
  2012. qcom_iommu_set_fault_model(gmu->domain, QCOM_IOMMU_FAULT_MODEL_NO_STALL);
  2013. ret = iommu_attach_device(gmu->domain, &gmu->pdev->dev);
  2014. if (!ret) {
  2015. iommu_set_fault_handler(gmu->domain,
  2016. gen7_gmu_iommu_fault_handler, gmu);
  2017. return 0;
  2018. }
  2019. dev_err(&gmu->pdev->dev,
  2020. "Unable to attach GMU IOMMU domain: %d\n", ret);
  2021. iommu_domain_free(gmu->domain);
  2022. gmu->domain = NULL;
  2023. return ret;
  2024. }
  2025. /* Default IFPC timer (300usec) value */
  2026. #define GEN7_GMU_LONG_IFPC_HYST FIELD_PREP(GENMASK(15, 0), 0x1680)
  2027. /* Minimum IFPC timer (200usec) allowed to override default value */
  2028. #define GEN7_GMU_LONG_IFPC_HYST_FLOOR FIELD_PREP(GENMASK(15, 0), 0x0F00)
  2029. int gen7_gmu_probe(struct kgsl_device *device,
  2030. struct platform_device *pdev)
  2031. {
  2032. struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
  2033. struct gen7_gmu_device *gmu = to_gen7_gmu(adreno_dev);
  2034. struct device *dev = &pdev->dev;
  2035. struct resource *res;
  2036. int ret, i;
  2037. gmu->pdev = pdev;
  2038. dma_set_coherent_mask(&gmu->pdev->dev, DMA_BIT_MASK(64));
  2039. gmu->pdev->dev.dma_mask = &gmu->pdev->dev.coherent_dma_mask;
  2040. set_dma_ops(&gmu->pdev->dev, NULL);
  2041. res = platform_get_resource_byname(device->pdev, IORESOURCE_MEM,
  2042. "rscc");
  2043. if (res) {
  2044. gmu->rscc_virt = devm_ioremap(&device->pdev->dev, res->start,
  2045. resource_size(res));
  2046. if (!gmu->rscc_virt) {
  2047. dev_err(&gmu->pdev->dev, "rscc ioremap failed\n");
  2048. return -ENOMEM;
  2049. }
  2050. }
  2051. /* Setup any rdpm register ranges */
  2052. gen7_gmu_rdpm_probe(gmu, device);
  2053. /* Set up GMU regulators */
  2054. ret = kgsl_pwrctrl_probe_regulators(device, pdev);
  2055. if (ret)
  2056. return ret;
  2057. ret = gen7_gmu_clk_probe(adreno_dev);
  2058. if (ret)
  2059. return ret;
  2060. /* Set up GMU IOMMU and shared memory with GMU */
  2061. ret = gen7_gmu_iommu_init(gmu);
  2062. if (ret)
  2063. goto error;
  2064. gmu->vma = gen7_gmu_vma;
  2065. for (i = 0; i < ARRAY_SIZE(gen7_gmu_vma); i++) {
  2066. struct gmu_vma_entry *vma = &gen7_gmu_vma[i];
  2067. vma->vma_root = RB_ROOT;
  2068. spin_lock_init(&vma->lock);
  2069. }
  2070. /* Map and reserve GMU CSRs registers */
  2071. ret = gen7_gmu_reg_probe(adreno_dev);
  2072. if (ret)
  2073. goto error;
  2074. /* Populates RPMh configurations */
  2075. ret = gen7_build_rpmh_tables(adreno_dev);
  2076. if (ret)
  2077. goto error;
  2078. /* Set up GMU idle state */
  2079. if (ADRENO_FEATURE(adreno_dev, ADRENO_IFPC)) {
  2080. gmu->idle_level = GPU_HW_IFPC;
  2081. adreno_dev->ifpc_hyst = GEN7_GMU_LONG_IFPC_HYST;
  2082. adreno_dev->ifpc_hyst_floor = GEN7_GMU_LONG_IFPC_HYST_FLOOR;
  2083. } else {
  2084. gmu->idle_level = GPU_HW_ACTIVE;
  2085. }
  2086. gen7_gmu_acd_probe(device, gmu, pdev->dev.of_node);
  2087. set_bit(GMU_ENABLED, &device->gmu_core.flags);
  2088. device->gmu_core.dev_ops = &gen7_gmudev;
  2089. /* Set default GMU attributes */
  2090. gmu->log_stream_enable = false;
  2091. gmu->log_group_mask = 0x3;
  2092. /* Initialize to zero to detect trace packet loss */
  2093. gmu->trace.seq_num = 0;
  2094. /* Disabled by default */
  2095. gmu->stats_enable = false;
  2096. /* Set default to CM3 busy cycles countable */
  2097. gmu->stats_mask = BIT(GEN7_GMU_CM3_BUSY_CYCLES);
  2098. /* Interval is in 50 us units. Set default sampling frequency to 4x50 us */
  2099. gmu->stats_interval = HFI_FEATURE_GMU_STATS_INTERVAL;
  2100. /* GMU sysfs nodes setup */
  2101. (void) kobject_init_and_add(&gmu->log_kobj, &log_kobj_type, &dev->kobj, "log");
  2102. (void) kobject_init_and_add(&gmu->stats_kobj, &stats_kobj_type, &dev->kobj, "stats");
  2103. of_property_read_u32(gmu->pdev->dev.of_node, "qcom,gmu-perf-ddr-bw",
  2104. &gmu->perf_ddr_bw);
  2105. spin_lock_init(&gmu->hfi.cmdq_lock);
  2106. gmu->irq = kgsl_request_irq(gmu->pdev, "gmu",
  2107. gen7_gmu_irq_handler, device);
  2108. if (gmu->irq >= 0)
  2109. return 0;
  2110. ret = gmu->irq;
  2111. error:
  2112. gen7_gmu_remove(device);
  2113. return ret;
  2114. }
  2115. static void gen7_gmu_active_count_put(struct adreno_device *adreno_dev)
  2116. {
  2117. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  2118. if (WARN_ON(!mutex_is_locked(&device->mutex)))
  2119. return;
  2120. if (WARN(atomic_read(&device->active_cnt) == 0,
  2121. "Unbalanced get/put calls to KGSL active count\n"))
  2122. return;
  2123. if (atomic_dec_and_test(&device->active_cnt)) {
  2124. kgsl_pwrscale_update_stats(device);
  2125. kgsl_pwrscale_update(device);
  2126. kgsl_start_idle_timer(device);
  2127. }
  2128. trace_kgsl_active_count(device,
  2129. (unsigned long) __builtin_return_address(0));
  2130. wake_up(&device->active_cnt_wq);
  2131. }
  2132. int gen7_halt_gbif(struct adreno_device *adreno_dev)
  2133. {
  2134. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  2135. int ret;
  2136. /* Halt new client requests */
  2137. kgsl_regwrite(device, GEN7_GBIF_HALT, GEN7_GBIF_CLIENT_HALT_MASK);
  2138. ret = adreno_wait_for_halt_ack(device,
  2139. GEN7_GBIF_HALT_ACK, GEN7_GBIF_CLIENT_HALT_MASK);
  2140. /* Halt all AXI requests */
  2141. kgsl_regwrite(device, GEN7_GBIF_HALT, GEN7_GBIF_ARB_HALT_MASK);
  2142. ret = adreno_wait_for_halt_ack(device,
  2143. GEN7_GBIF_HALT_ACK, GEN7_GBIF_ARB_HALT_MASK);
  2144. /* De-assert the halts */
  2145. kgsl_regwrite(device, GEN7_GBIF_HALT, 0x0);
  2146. return ret;
  2147. }
  2148. static int gen7_gmu_power_off(struct adreno_device *adreno_dev)
  2149. {
  2150. struct gen7_gmu_device *gmu = to_gen7_gmu(adreno_dev);
  2151. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  2152. int ret = 0;
  2153. if (device->gmu_fault)
  2154. goto error;
  2155. /* Wait for the lowest idle level we requested */
  2156. ret = gen7_gmu_wait_for_lowest_idle(adreno_dev);
  2157. if (ret)
  2158. goto error;
  2159. ret = gen7_complete_rpmh_votes(gmu, 2);
  2160. if (ret)
  2161. goto error;
  2162. ret = gen7_gmu_notify_slumber(adreno_dev);
  2163. if (ret)
  2164. goto error;
  2165. ret = gen7_gmu_wait_for_idle(adreno_dev);
  2166. if (ret)
  2167. goto error;
  2168. ret = gen7_rscc_sleep_sequence(adreno_dev);
  2169. if (ret)
  2170. goto error;
  2171. gen7_rdpm_mx_freq_update(gmu, 0);
  2172. /* Now that we are done with GMU and GPU, Clear the GBIF */
  2173. ret = gen7_halt_gbif(adreno_dev);
  2174. if (ret)
  2175. goto error;
  2176. gen7_gmu_irq_disable(adreno_dev);
  2177. gen7_hfi_stop(adreno_dev);
  2178. clk_bulk_disable_unprepare(gmu->num_clks, gmu->clks);
  2179. kgsl_pwrctrl_disable_cx_gdsc(device);
  2180. gen7_rdpm_cx_freq_update(gmu, 0);
  2181. kgsl_pwrctrl_set_state(device, KGSL_STATE_NONE);
  2182. return 0;
  2183. error:
  2184. gen7_gmu_irq_disable(adreno_dev);
  2185. gen7_hfi_stop(adreno_dev);
  2186. gen7_gmu_suspend(adreno_dev);
  2187. return ret;
  2188. }
  2189. void gen7_enable_gpu_irq(struct adreno_device *adreno_dev)
  2190. {
  2191. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  2192. kgsl_pwrctrl_irq(device, true);
  2193. adreno_irqctrl(adreno_dev, 1);
  2194. }
  2195. void gen7_disable_gpu_irq(struct adreno_device *adreno_dev)
  2196. {
  2197. kgsl_pwrctrl_irq(KGSL_DEVICE(adreno_dev), false);
  2198. if (gen7_gmu_gx_is_on(adreno_dev))
  2199. adreno_irqctrl(adreno_dev, 0);
  2200. }
  2201. static int gen7_gpu_boot(struct adreno_device *adreno_dev)
  2202. {
  2203. const struct adreno_gen7_core *gen7_core = to_gen7_core(adreno_dev);
  2204. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  2205. int ret;
  2206. adreno_set_active_ctxs_null(adreno_dev);
  2207. ret = kgsl_mmu_start(device);
  2208. if (ret)
  2209. goto err;
  2210. ret = gen7_gmu_oob_set(device, oob_gpu);
  2211. if (ret)
  2212. goto oob_clear;
  2213. ret = gen7_gmu_hfi_start_msg(adreno_dev);
  2214. if (ret)
  2215. goto oob_clear;
  2216. /* Clear the busy_data stats - we're starting over from scratch */
  2217. memset(&adreno_dev->busy_data, 0, sizeof(adreno_dev->busy_data));
  2218. gen7_start(adreno_dev);
  2219. if (gen7_core->qos_value && adreno_is_preemption_enabled(adreno_dev))
  2220. kgsl_regwrite(device, GEN7_RBBM_GBIF_CLIENT_QOS_CNTL,
  2221. gen7_core->qos_value[adreno_dev->cur_rb->id]);
  2222. /* Re-initialize the coresight registers if applicable */
  2223. adreno_coresight_start(adreno_dev);
  2224. adreno_perfcounter_start(adreno_dev);
  2225. /* Clear FSR here in case it is set from a previous pagefault */
  2226. kgsl_mmu_clear_fsr(&device->mmu);
  2227. gen7_enable_gpu_irq(adreno_dev);
  2228. ret = gen7_rb_start(adreno_dev);
  2229. if (ret) {
  2230. gen7_disable_gpu_irq(adreno_dev);
  2231. goto oob_clear;
  2232. }
  2233. /*
  2234. * At this point it is safe to assume that we recovered. Setting
  2235. * this field allows us to take a new snapshot for the next failure
  2236. * if we are prioritizing the first unrecoverable snapshot.
  2237. */
  2238. if (device->snapshot)
  2239. device->snapshot->recovered = true;
  2240. /* Start the dispatcher */
  2241. adreno_dispatcher_start(device);
  2242. device->reset_counter++;
  2243. gen7_gmu_oob_clear(device, oob_gpu);
  2244. return 0;
  2245. oob_clear:
  2246. gen7_gmu_oob_clear(device, oob_gpu);
  2247. err:
  2248. gen7_gmu_power_off(adreno_dev);
  2249. return ret;
  2250. }
  2251. static void gmu_idle_timer(struct timer_list *t)
  2252. {
  2253. struct kgsl_device *device = container_of(t, struct kgsl_device,
  2254. idle_timer);
  2255. kgsl_schedule_work(&device->idle_check_ws);
  2256. }
  2257. static int gen7_boot(struct adreno_device *adreno_dev)
  2258. {
  2259. struct gen7_gmu_device *gmu = to_gen7_gmu(adreno_dev);
  2260. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  2261. int ret;
  2262. if (WARN_ON(test_bit(GMU_PRIV_GPU_STARTED, &gmu->flags)))
  2263. return 0;
  2264. kgsl_pwrctrl_request_state(device, KGSL_STATE_ACTIVE);
  2265. ret = gen7_gmu_boot(adreno_dev);
  2266. if (ret)
  2267. return ret;
  2268. ret = gen7_gpu_boot(adreno_dev);
  2269. if (ret)
  2270. return ret;
  2271. kgsl_start_idle_timer(device);
  2272. kgsl_pwrscale_wake(device);
  2273. set_bit(GMU_PRIV_GPU_STARTED, &gmu->flags);
  2274. device->pwrctrl.last_stat_updated = ktime_get();
  2275. kgsl_pwrctrl_set_state(device, KGSL_STATE_ACTIVE);
  2276. return ret;
  2277. }
  2278. static int gen7_first_boot(struct adreno_device *adreno_dev)
  2279. {
  2280. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  2281. struct gen7_gmu_device *gmu = to_gen7_gmu(adreno_dev);
  2282. int ret;
  2283. if (test_bit(GMU_PRIV_FIRST_BOOT_DONE, &gmu->flags)) {
  2284. if (!test_bit(GMU_PRIV_GPU_STARTED, &gmu->flags))
  2285. return gen7_boot(adreno_dev);
  2286. return 0;
  2287. }
  2288. ret = gen7_ringbuffer_init(adreno_dev);
  2289. if (ret)
  2290. return ret;
  2291. ret = gen7_microcode_read(adreno_dev);
  2292. if (ret)
  2293. return ret;
  2294. ret = gen7_init(adreno_dev);
  2295. if (ret)
  2296. return ret;
  2297. ret = gen7_gmu_init(adreno_dev);
  2298. if (ret)
  2299. return ret;
  2300. kgsl_pwrctrl_request_state(device, KGSL_STATE_ACTIVE);
  2301. ret = gen7_gmu_first_boot(adreno_dev);
  2302. if (ret)
  2303. return ret;
  2304. ret = gen7_gpu_boot(adreno_dev);
  2305. if (ret)
  2306. return ret;
  2307. adreno_get_bus_counters(adreno_dev);
  2308. adreno_dev->cooperative_reset = ADRENO_FEATURE(adreno_dev,
  2309. ADRENO_COOP_RESET);
  2310. adreno_create_profile_buffer(adreno_dev);
  2311. set_bit(GMU_PRIV_FIRST_BOOT_DONE, &gmu->flags);
  2312. set_bit(GMU_PRIV_GPU_STARTED, &gmu->flags);
  2313. /*
  2314. * BCL needs respective Central Broadcast register to
  2315. * be programed from TZ. For kernel version prior to 6.1, this
  2316. * programing happens only when zap shader firmware load is successful.
  2317. * Zap firmware load can fail in boot up path hence enable BCL only
  2318. * after we successfully complete first boot to ensure that Central
  2319. * Broadcast register was programed before enabling BCL.
  2320. */
  2321. if (ADRENO_FEATURE(adreno_dev, ADRENO_BCL))
  2322. adreno_dev->bcl_enabled = true;
  2323. /*
  2324. * There is a possible deadlock scenario during kgsl firmware reading
  2325. * (request_firmware) and devfreq update calls. During first boot, kgsl
  2326. * device mutex is held and then request_firmware is called for reading
  2327. * firmware. request_firmware internally takes dev_pm_qos_mtx lock.
  2328. * Whereas in case of devfreq update calls triggered by thermal/bcl or
  2329. * devfreq sysfs, it first takes the same dev_pm_qos_mtx lock and then
  2330. * tries to take kgsl device mutex as part of get_dev_status/target
  2331. * calls. This results in deadlock when both thread are unable to acquire
  2332. * the mutex held by other thread. Enable devfreq updates now as we are
  2333. * done reading all firmware files.
  2334. */
  2335. device->pwrscale.devfreq_enabled = true;
  2336. device->pwrctrl.last_stat_updated = ktime_get();
  2337. kgsl_pwrctrl_set_state(device, KGSL_STATE_ACTIVE);
  2338. return 0;
  2339. }
  2340. static bool gen7_irq_pending(struct adreno_device *adreno_dev)
  2341. {
  2342. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  2343. u32 status;
  2344. kgsl_regread(device, GEN7_RBBM_INT_0_STATUS, &status);
  2345. /* Return busy if a interrupt is pending */
  2346. return ((status & adreno_dev->irq_mask) ||
  2347. atomic_read(&adreno_dev->pending_irq_refcnt));
  2348. }
  2349. static int gen7_power_off(struct adreno_device *adreno_dev)
  2350. {
  2351. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  2352. struct gen7_gmu_device *gmu = to_gen7_gmu(adreno_dev);
  2353. int ret;
  2354. WARN_ON(!test_bit(GMU_PRIV_GPU_STARTED, &gmu->flags));
  2355. adreno_suspend_context(device);
  2356. /*
  2357. * adreno_suspend_context() unlocks the device mutex, which
  2358. * could allow a concurrent thread to attempt SLUMBER sequence.
  2359. * Hence, check the flags again before proceeding with SLUMBER.
  2360. */
  2361. if (!test_bit(GMU_PRIV_GPU_STARTED, &gmu->flags))
  2362. return 0;
  2363. kgsl_pwrctrl_request_state(device, KGSL_STATE_SLUMBER);
  2364. ret = gen7_gmu_oob_set(device, oob_gpu);
  2365. if (ret)
  2366. goto no_gx_power;
  2367. if (gen7_irq_pending(adreno_dev)) {
  2368. gen7_gmu_oob_clear(device, oob_gpu);
  2369. return -EBUSY;
  2370. }
  2371. kgsl_pwrscale_update_stats(device);
  2372. /* Save active coresight registers if applicable */
  2373. adreno_coresight_stop(adreno_dev);
  2374. adreno_irqctrl(adreno_dev, 0);
  2375. no_gx_power:
  2376. gen7_gmu_oob_clear(device, oob_gpu);
  2377. kgsl_pwrctrl_irq(device, false);
  2378. gen7_gmu_power_off(adreno_dev);
  2379. adreno_set_active_ctxs_null(adreno_dev);
  2380. adreno_dispatcher_stop(adreno_dev);
  2381. adreno_ringbuffer_stop(adreno_dev);
  2382. adreno_llcc_slice_deactivate(adreno_dev);
  2383. clear_bit(GMU_PRIV_GPU_STARTED, &gmu->flags);
  2384. del_timer_sync(&device->idle_timer);
  2385. kgsl_pwrscale_sleep(device);
  2386. kgsl_pwrctrl_clear_l3_vote(device);
  2387. /*
  2388. * Reset the context records so that CP can start
  2389. * at the correct read pointer for BV thread after
  2390. * coming out of slumber.
  2391. */
  2392. gen7_reset_preempt_records(adreno_dev);
  2393. kgsl_pwrctrl_set_state(device, KGSL_STATE_SLUMBER);
  2394. return ret;
  2395. }
  2396. static void gmu_idle_check(struct work_struct *work)
  2397. {
  2398. struct kgsl_device *device = container_of(work,
  2399. struct kgsl_device, idle_check_ws);
  2400. struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
  2401. struct gen7_gmu_device *gmu = to_gen7_gmu(adreno_dev);
  2402. int ret;
  2403. mutex_lock(&device->mutex);
  2404. if (test_bit(GMU_DISABLE_SLUMBER, &device->gmu_core.flags))
  2405. goto done;
  2406. if (atomic_read(&device->active_cnt) || time_is_after_jiffies(device->idle_jiffies)) {
  2407. kgsl_pwrscale_update(device);
  2408. kgsl_start_idle_timer(device);
  2409. goto done;
  2410. }
  2411. if (!test_bit(GMU_PRIV_GPU_STARTED, &gmu->flags))
  2412. goto done;
  2413. spin_lock(&device->submit_lock);
  2414. if (device->submit_now) {
  2415. spin_unlock(&device->submit_lock);
  2416. kgsl_pwrscale_update(device);
  2417. kgsl_start_idle_timer(device);
  2418. goto done;
  2419. }
  2420. device->skip_inline_submit = true;
  2421. spin_unlock(&device->submit_lock);
  2422. ret = gen7_power_off(adreno_dev);
  2423. if (ret == -EBUSY) {
  2424. kgsl_pwrscale_update(device);
  2425. kgsl_start_idle_timer(device);
  2426. }
  2427. done:
  2428. mutex_unlock(&device->mutex);
  2429. }
  2430. static int gen7_gmu_first_open(struct adreno_device *adreno_dev)
  2431. {
  2432. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  2433. int ret;
  2434. /*
  2435. * Do the one time settings that need to happen when we
  2436. * attempt to boot the gpu the very first time
  2437. */
  2438. ret = gen7_first_boot(adreno_dev);
  2439. if (ret)
  2440. return ret;
  2441. /*
  2442. * A client that does a first_open but never closes the device
  2443. * may prevent us from going back to SLUMBER. So trigger the idle
  2444. * check by incrementing the active count and immediately releasing it.
  2445. */
  2446. atomic_inc(&device->active_cnt);
  2447. gen7_gmu_active_count_put(adreno_dev);
  2448. return 0;
  2449. }
  2450. static int gen7_gmu_last_close(struct adreno_device *adreno_dev)
  2451. {
  2452. struct gen7_gmu_device *gmu = to_gen7_gmu(adreno_dev);
  2453. if (test_bit(GMU_PRIV_GPU_STARTED, &gmu->flags))
  2454. return gen7_power_off(adreno_dev);
  2455. return 0;
  2456. }
  2457. static int gen7_gmu_active_count_get(struct adreno_device *adreno_dev)
  2458. {
  2459. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  2460. struct gen7_gmu_device *gmu = to_gen7_gmu(adreno_dev);
  2461. int ret = 0;
  2462. if (WARN_ON(!mutex_is_locked(&device->mutex)))
  2463. return -EINVAL;
  2464. if (test_bit(GMU_PRIV_PM_SUSPEND, &gmu->flags))
  2465. return -EINVAL;
  2466. if ((atomic_read(&device->active_cnt) == 0) &&
  2467. !test_bit(GMU_PRIV_GPU_STARTED, &gmu->flags))
  2468. ret = gen7_boot(adreno_dev);
  2469. if (ret == 0)
  2470. atomic_inc(&device->active_cnt);
  2471. trace_kgsl_active_count(device,
  2472. (unsigned long) __builtin_return_address(0));
  2473. return ret;
  2474. }
  2475. static int gen7_gmu_pm_suspend(struct adreno_device *adreno_dev)
  2476. {
  2477. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  2478. struct gen7_gmu_device *gmu = to_gen7_gmu(adreno_dev);
  2479. int ret;
  2480. if (test_bit(GMU_PRIV_PM_SUSPEND, &gmu->flags))
  2481. return 0;
  2482. kgsl_pwrctrl_request_state(device, KGSL_STATE_SUSPEND);
  2483. /* Halt any new submissions */
  2484. reinit_completion(&device->halt_gate);
  2485. /* wait for active count so device can be put in slumber */
  2486. ret = kgsl_active_count_wait(device, 0, HZ);
  2487. if (ret) {
  2488. dev_err(device->dev,
  2489. "Timed out waiting for the active count\n");
  2490. goto err;
  2491. }
  2492. ret = adreno_idle(device);
  2493. if (ret)
  2494. goto err;
  2495. if (test_bit(GMU_PRIV_GPU_STARTED, &gmu->flags))
  2496. gen7_power_off(adreno_dev);
  2497. set_bit(GMU_PRIV_PM_SUSPEND, &gmu->flags);
  2498. adreno_get_gpu_halt(adreno_dev);
  2499. kgsl_pwrctrl_set_state(device, KGSL_STATE_SUSPEND);
  2500. return 0;
  2501. err:
  2502. adreno_dispatcher_start(device);
  2503. return ret;
  2504. }
  2505. static void gen7_gmu_pm_resume(struct adreno_device *adreno_dev)
  2506. {
  2507. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  2508. struct gen7_gmu_device *gmu = to_gen7_gmu(adreno_dev);
  2509. if (WARN(!test_bit(GMU_PRIV_PM_SUSPEND, &gmu->flags),
  2510. "resume invoked without a suspend\n"))
  2511. return;
  2512. adreno_put_gpu_halt(adreno_dev);
  2513. adreno_dispatcher_start(device);
  2514. clear_bit(GMU_PRIV_PM_SUSPEND, &gmu->flags);
  2515. }
  2516. static void gen7_gmu_touch_wakeup(struct adreno_device *adreno_dev)
  2517. {
  2518. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  2519. struct gen7_gmu_device *gmu = to_gen7_gmu(adreno_dev);
  2520. int ret;
  2521. /*
  2522. * Do not wake up a suspended device or until the first boot sequence
  2523. * has been completed.
  2524. */
  2525. if (test_bit(GMU_PRIV_PM_SUSPEND, &gmu->flags) ||
  2526. !test_bit(GMU_PRIV_FIRST_BOOT_DONE, &gmu->flags))
  2527. return;
  2528. if (test_bit(GMU_PRIV_GPU_STARTED, &gmu->flags))
  2529. goto done;
  2530. kgsl_pwrctrl_request_state(device, KGSL_STATE_ACTIVE);
  2531. ret = gen7_gmu_boot(adreno_dev);
  2532. if (ret)
  2533. return;
  2534. ret = gen7_gpu_boot(adreno_dev);
  2535. if (ret)
  2536. return;
  2537. kgsl_pwrscale_wake(device);
  2538. set_bit(GMU_PRIV_GPU_STARTED, &gmu->flags);
  2539. device->pwrctrl.last_stat_updated = ktime_get();
  2540. kgsl_pwrctrl_set_state(device, KGSL_STATE_ACTIVE);
  2541. done:
  2542. /*
  2543. * When waking up from a touch event we want to stay active long enough
  2544. * for the user to send a draw command. The default idle timer timeout
  2545. * is shorter than we want so go ahead and push the idle timer out
  2546. * further for this special case
  2547. */
  2548. mod_timer(&device->idle_timer, jiffies +
  2549. msecs_to_jiffies(adreno_wake_timeout));
  2550. }
  2551. const struct adreno_power_ops gen7_gmu_power_ops = {
  2552. .first_open = gen7_gmu_first_open,
  2553. .last_close = gen7_gmu_last_close,
  2554. .active_count_get = gen7_gmu_active_count_get,
  2555. .active_count_put = gen7_gmu_active_count_put,
  2556. .pm_suspend = gen7_gmu_pm_suspend,
  2557. .pm_resume = gen7_gmu_pm_resume,
  2558. .touch_wakeup = gen7_gmu_touch_wakeup,
  2559. .gpu_clock_set = gen7_gmu_clock_set,
  2560. .gpu_bus_set = gen7_gmu_bus_set,
  2561. };
  2562. int gen7_gmu_device_probe(struct platform_device *pdev,
  2563. u32 chipid, const struct adreno_gpu_core *gpucore)
  2564. {
  2565. struct adreno_device *adreno_dev;
  2566. struct kgsl_device *device;
  2567. struct gen7_device *gen7_dev;
  2568. int ret;
  2569. gen7_dev = devm_kzalloc(&pdev->dev, sizeof(*gen7_dev),
  2570. GFP_KERNEL);
  2571. if (!gen7_dev)
  2572. return -ENOMEM;
  2573. adreno_dev = &gen7_dev->adreno_dev;
  2574. adreno_dev->irq_mask = GEN7_INT_MASK;
  2575. ret = gen7_probe_common(pdev, adreno_dev, chipid, gpucore);
  2576. if (ret)
  2577. return ret;
  2578. ret = adreno_dispatcher_init(adreno_dev);
  2579. if (ret) {
  2580. dev_err(&pdev->dev, "adreno dispatcher init failed ret %d\n", ret);
  2581. return ret;
  2582. }
  2583. device = KGSL_DEVICE(adreno_dev);
  2584. INIT_WORK(&device->idle_check_ws, gmu_idle_check);
  2585. timer_setup(&device->idle_timer, gmu_idle_timer, 0);
  2586. if (ADRENO_FEATURE(adreno_dev, ADRENO_DMS)) {
  2587. set_bit(ADRENO_DEVICE_DMS, &adreno_dev->priv);
  2588. adreno_dev->dms_enabled = true;
  2589. }
  2590. return 0;
  2591. }
  2592. int gen7_gmu_reset(struct adreno_device *adreno_dev)
  2593. {
  2594. struct gen7_gmu_device *gmu = to_gen7_gmu(adreno_dev);
  2595. gen7_disable_gpu_irq(adreno_dev);
  2596. gen7_gmu_irq_disable(adreno_dev);
  2597. gen7_hfi_stop(adreno_dev);
  2598. /* Hard reset the gmu and gpu */
  2599. gen7_gmu_suspend(adreno_dev);
  2600. gen7_reset_preempt_records(adreno_dev);
  2601. adreno_llcc_slice_deactivate(adreno_dev);
  2602. clear_bit(GMU_PRIV_GPU_STARTED, &gmu->flags);
  2603. /* Attempt to reboot the gmu and gpu */
  2604. return gen7_boot(adreno_dev);
  2605. }
  2606. int gen7_gmu_hfi_probe(struct adreno_device *adreno_dev)
  2607. {
  2608. struct gen7_gmu_device *gmu = to_gen7_gmu(adreno_dev);
  2609. struct gen7_hfi *hfi = &gmu->hfi;
  2610. hfi->irq = kgsl_request_irq(gmu->pdev, "hfi",
  2611. gen7_hfi_irq_handler, KGSL_DEVICE(adreno_dev));
  2612. return hfi->irq < 0 ? hfi->irq : 0;
  2613. }
  2614. int gen7_gmu_add_to_minidump(struct adreno_device *adreno_dev)
  2615. {
  2616. struct gen7_device *gen7_dev = container_of(adreno_dev,
  2617. struct gen7_device, adreno_dev);
  2618. int ret;
  2619. ret = kgsl_add_va_to_minidump(adreno_dev->dev.dev, KGSL_GEN7_DEVICE,
  2620. (void *)(gen7_dev), sizeof(struct gen7_device));
  2621. if (ret)
  2622. return ret;
  2623. ret = kgsl_add_va_to_minidump(adreno_dev->dev.dev, KGSL_GMU_LOG_ENTRY,
  2624. gen7_dev->gmu.gmu_log->hostptr, gen7_dev->gmu.gmu_log->size);
  2625. if (ret)
  2626. return ret;
  2627. ret = kgsl_add_va_to_minidump(adreno_dev->dev.dev, KGSL_HFIMEM_ENTRY,
  2628. gen7_dev->gmu.hfi.hfi_mem->hostptr, gen7_dev->gmu.hfi.hfi_mem->size);
  2629. return ret;
  2630. }
  2631. static int gen7_gmu_bind(struct device *dev, struct device *master, void *data)
  2632. {
  2633. struct kgsl_device *device = dev_get_drvdata(master);
  2634. struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
  2635. const struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
  2636. const struct gen7_gpudev *gen7_gpudev = to_gen7_gpudev(gpudev);
  2637. int ret;
  2638. ret = gen7_gmu_probe(device, to_platform_device(dev));
  2639. if (ret)
  2640. return ret;
  2641. if (gen7_gpudev->hfi_probe) {
  2642. ret = gen7_gpudev->hfi_probe(adreno_dev);
  2643. if (ret) {
  2644. gen7_gmu_remove(device);
  2645. return ret;
  2646. }
  2647. }
  2648. return 0;
  2649. }
  2650. static void gen7_gmu_unbind(struct device *dev, struct device *master,
  2651. void *data)
  2652. {
  2653. struct kgsl_device *device = dev_get_drvdata(master);
  2654. struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
  2655. const struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
  2656. const struct gen7_gpudev *gen7_gpudev = to_gen7_gpudev(gpudev);
  2657. if (gen7_gpudev->hfi_remove)
  2658. gen7_gpudev->hfi_remove(adreno_dev);
  2659. gen7_gmu_remove(device);
  2660. }
  2661. static const struct component_ops gen7_gmu_component_ops = {
  2662. .bind = gen7_gmu_bind,
  2663. .unbind = gen7_gmu_unbind,
  2664. };
  2665. static int gen7_gmu_probe_dev(struct platform_device *pdev)
  2666. {
  2667. return component_add(&pdev->dev, &gen7_gmu_component_ops);
  2668. }
  2669. static int gen7_gmu_remove_dev(struct platform_device *pdev)
  2670. {
  2671. component_del(&pdev->dev, &gen7_gmu_component_ops);
  2672. return 0;
  2673. }
  2674. static const struct of_device_id gen7_gmu_match_table[] = {
  2675. { .compatible = "qcom,gen7-gmu" },
  2676. { },
  2677. };
  2678. struct platform_driver gen7_gmu_driver = {
  2679. .probe = gen7_gmu_probe_dev,
  2680. .remove = gen7_gmu_remove_dev,
  2681. .driver = {
  2682. .name = "adreno-gen7-gmu",
  2683. .of_match_table = gen7_gmu_match_table,
  2684. },
  2685. };