adreno_a3xx.c 49 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2012-2021, The Linux Foundation. All rights reserved.
  4. * Copyright (c) 2022-2024, Qualcomm Innovation Center, Inc. All rights reserved.
  5. */
  6. #include <linux/clk/qcom.h>
  7. #include <linux/clk-provider.h>
  8. #include <linux/firmware.h>
  9. #include <linux/of.h>
  10. #include <linux/of_device.h>
  11. #include <linux/regulator/consumer.h>
  12. #include <linux/slab.h>
  13. #include "adreno.h"
  14. #include "adreno_cp_parser.h"
  15. #include "adreno_a3xx.h"
  16. #include "adreno_pm4types.h"
  17. #include "adreno_snapshot.h"
  18. #include "adreno_trace.h"
  19. /*
  20. * Define registers for a3xx that contain addresses used by the
  21. * cp parser logic
  22. */
  23. const unsigned int a3xx_cp_addr_regs[ADRENO_CP_ADDR_MAX] = {
  24. ADRENO_REG_DEFINE(ADRENO_CP_ADDR_VSC_PIPE_DATA_ADDRESS_0,
  25. A3XX_VSC_PIPE_DATA_ADDRESS_0),
  26. ADRENO_REG_DEFINE(ADRENO_CP_ADDR_VSC_PIPE_DATA_LENGTH_0,
  27. A3XX_VSC_PIPE_DATA_LENGTH_0),
  28. ADRENO_REG_DEFINE(ADRENO_CP_ADDR_VSC_PIPE_DATA_ADDRESS_1,
  29. A3XX_VSC_PIPE_DATA_ADDRESS_1),
  30. ADRENO_REG_DEFINE(ADRENO_CP_ADDR_VSC_PIPE_DATA_LENGTH_1,
  31. A3XX_VSC_PIPE_DATA_LENGTH_1),
  32. ADRENO_REG_DEFINE(ADRENO_CP_ADDR_VSC_PIPE_DATA_ADDRESS_2,
  33. A3XX_VSC_PIPE_DATA_ADDRESS_2),
  34. ADRENO_REG_DEFINE(ADRENO_CP_ADDR_VSC_PIPE_DATA_LENGTH_2,
  35. A3XX_VSC_PIPE_DATA_LENGTH_2),
  36. ADRENO_REG_DEFINE(ADRENO_CP_ADDR_VSC_PIPE_DATA_ADDRESS_3,
  37. A3XX_VSC_PIPE_DATA_ADDRESS_3),
  38. ADRENO_REG_DEFINE(ADRENO_CP_ADDR_VSC_PIPE_DATA_LENGTH_3,
  39. A3XX_VSC_PIPE_DATA_LENGTH_3),
  40. ADRENO_REG_DEFINE(ADRENO_CP_ADDR_VSC_PIPE_DATA_ADDRESS_4,
  41. A3XX_VSC_PIPE_DATA_ADDRESS_4),
  42. ADRENO_REG_DEFINE(ADRENO_CP_ADDR_VSC_PIPE_DATA_LENGTH_4,
  43. A3XX_VSC_PIPE_DATA_LENGTH_4),
  44. ADRENO_REG_DEFINE(ADRENO_CP_ADDR_VSC_PIPE_DATA_ADDRESS_5,
  45. A3XX_VSC_PIPE_DATA_ADDRESS_5),
  46. ADRENO_REG_DEFINE(ADRENO_CP_ADDR_VSC_PIPE_DATA_LENGTH_5,
  47. A3XX_VSC_PIPE_DATA_LENGTH_5),
  48. ADRENO_REG_DEFINE(ADRENO_CP_ADDR_VSC_PIPE_DATA_ADDRESS_6,
  49. A3XX_VSC_PIPE_DATA_ADDRESS_6),
  50. ADRENO_REG_DEFINE(ADRENO_CP_ADDR_VSC_PIPE_DATA_LENGTH_6,
  51. A3XX_VSC_PIPE_DATA_LENGTH_6),
  52. ADRENO_REG_DEFINE(ADRENO_CP_ADDR_VSC_PIPE_DATA_ADDRESS_7,
  53. A3XX_VSC_PIPE_DATA_ADDRESS_7),
  54. ADRENO_REG_DEFINE(ADRENO_CP_ADDR_VSC_PIPE_DATA_LENGTH_7,
  55. A3XX_VSC_PIPE_DATA_LENGTH_7),
  56. ADRENO_REG_DEFINE(ADRENO_CP_ADDR_VFD_FETCH_INSTR_1_0,
  57. A3XX_VFD_FETCH_INSTR_1_0),
  58. ADRENO_REG_DEFINE(ADRENO_CP_ADDR_VFD_FETCH_INSTR_1_1,
  59. A3XX_VFD_FETCH_INSTR_1_1),
  60. ADRENO_REG_DEFINE(ADRENO_CP_ADDR_VFD_FETCH_INSTR_1_2,
  61. A3XX_VFD_FETCH_INSTR_1_2),
  62. ADRENO_REG_DEFINE(ADRENO_CP_ADDR_VFD_FETCH_INSTR_1_3,
  63. A3XX_VFD_FETCH_INSTR_1_3),
  64. ADRENO_REG_DEFINE(ADRENO_CP_ADDR_VFD_FETCH_INSTR_1_4,
  65. A3XX_VFD_FETCH_INSTR_1_4),
  66. ADRENO_REG_DEFINE(ADRENO_CP_ADDR_VFD_FETCH_INSTR_1_5,
  67. A3XX_VFD_FETCH_INSTR_1_5),
  68. ADRENO_REG_DEFINE(ADRENO_CP_ADDR_VFD_FETCH_INSTR_1_6,
  69. A3XX_VFD_FETCH_INSTR_1_6),
  70. ADRENO_REG_DEFINE(ADRENO_CP_ADDR_VFD_FETCH_INSTR_1_7,
  71. A3XX_VFD_FETCH_INSTR_1_7),
  72. ADRENO_REG_DEFINE(ADRENO_CP_ADDR_VFD_FETCH_INSTR_1_8,
  73. A3XX_VFD_FETCH_INSTR_1_8),
  74. ADRENO_REG_DEFINE(ADRENO_CP_ADDR_VFD_FETCH_INSTR_1_9,
  75. A3XX_VFD_FETCH_INSTR_1_9),
  76. ADRENO_REG_DEFINE(ADRENO_CP_ADDR_VFD_FETCH_INSTR_1_10,
  77. A3XX_VFD_FETCH_INSTR_1_A),
  78. ADRENO_REG_DEFINE(ADRENO_CP_ADDR_VFD_FETCH_INSTR_1_11,
  79. A3XX_VFD_FETCH_INSTR_1_B),
  80. ADRENO_REG_DEFINE(ADRENO_CP_ADDR_VFD_FETCH_INSTR_1_12,
  81. A3XX_VFD_FETCH_INSTR_1_C),
  82. ADRENO_REG_DEFINE(ADRENO_CP_ADDR_VFD_FETCH_INSTR_1_13,
  83. A3XX_VFD_FETCH_INSTR_1_D),
  84. ADRENO_REG_DEFINE(ADRENO_CP_ADDR_VFD_FETCH_INSTR_1_14,
  85. A3XX_VFD_FETCH_INSTR_1_E),
  86. ADRENO_REG_DEFINE(ADRENO_CP_ADDR_VFD_FETCH_INSTR_1_15,
  87. A3XX_VFD_FETCH_INSTR_1_F),
  88. ADRENO_REG_DEFINE(ADRENO_CP_ADDR_VSC_SIZE_ADDRESS,
  89. A3XX_VSC_SIZE_ADDRESS),
  90. ADRENO_REG_DEFINE(ADRENO_CP_ADDR_SP_VS_PVT_MEM_ADDR,
  91. A3XX_SP_VS_PVT_MEM_ADDR_REG),
  92. ADRENO_REG_DEFINE(ADRENO_CP_ADDR_SP_FS_PVT_MEM_ADDR,
  93. A3XX_SP_FS_PVT_MEM_ADDR_REG),
  94. ADRENO_REG_DEFINE(ADRENO_CP_ADDR_SP_VS_OBJ_START_REG,
  95. A3XX_SP_VS_OBJ_START_REG),
  96. ADRENO_REG_DEFINE(ADRENO_CP_ADDR_SP_FS_OBJ_START_REG,
  97. A3XX_SP_FS_OBJ_START_REG),
  98. };
  99. static const unsigned int _a3xx_pwron_fixup_fs_instructions[] = {
  100. 0x00000000, 0x302CC300, 0x00000000, 0x302CC304,
  101. 0x00000000, 0x302CC308, 0x00000000, 0x302CC30C,
  102. 0x00000000, 0x302CC310, 0x00000000, 0x302CC314,
  103. 0x00000000, 0x302CC318, 0x00000000, 0x302CC31C,
  104. 0x00000000, 0x302CC320, 0x00000000, 0x302CC324,
  105. 0x00000000, 0x302CC328, 0x00000000, 0x302CC32C,
  106. 0x00000000, 0x302CC330, 0x00000000, 0x302CC334,
  107. 0x00000000, 0x302CC338, 0x00000000, 0x302CC33C,
  108. 0x00000000, 0x00000400, 0x00020000, 0x63808003,
  109. 0x00060004, 0x63828007, 0x000A0008, 0x6384800B,
  110. 0x000E000C, 0x6386800F, 0x00120010, 0x63888013,
  111. 0x00160014, 0x638A8017, 0x001A0018, 0x638C801B,
  112. 0x001E001C, 0x638E801F, 0x00220020, 0x63908023,
  113. 0x00260024, 0x63928027, 0x002A0028, 0x6394802B,
  114. 0x002E002C, 0x6396802F, 0x00320030, 0x63988033,
  115. 0x00360034, 0x639A8037, 0x003A0038, 0x639C803B,
  116. 0x003E003C, 0x639E803F, 0x00000000, 0x00000400,
  117. 0x00000003, 0x80D60003, 0x00000007, 0x80D60007,
  118. 0x0000000B, 0x80D6000B, 0x0000000F, 0x80D6000F,
  119. 0x00000013, 0x80D60013, 0x00000017, 0x80D60017,
  120. 0x0000001B, 0x80D6001B, 0x0000001F, 0x80D6001F,
  121. 0x00000023, 0x80D60023, 0x00000027, 0x80D60027,
  122. 0x0000002B, 0x80D6002B, 0x0000002F, 0x80D6002F,
  123. 0x00000033, 0x80D60033, 0x00000037, 0x80D60037,
  124. 0x0000003B, 0x80D6003B, 0x0000003F, 0x80D6003F,
  125. 0x00000000, 0x03000000, 0x00000000, 0x00000000,
  126. };
  127. #define A3XX_INT_MASK \
  128. ((1 << A3XX_INT_RBBM_AHB_ERROR) | \
  129. (1 << A3XX_INT_RBBM_ATB_BUS_OVERFLOW) | \
  130. (1 << A3XX_INT_CP_T0_PACKET_IN_IB) | \
  131. (1 << A3XX_INT_CP_OPCODE_ERROR) | \
  132. (1 << A3XX_INT_CP_RESERVED_BIT_ERROR) | \
  133. (1 << A3XX_INT_CP_HW_FAULT) | \
  134. (1 << A3XX_INT_CP_IB1_INT) | \
  135. (1 << A3XX_INT_CP_IB2_INT) | \
  136. (1 << A3XX_INT_CP_RB_INT) | \
  137. (1 << A3XX_INT_CACHE_FLUSH_TS) | \
  138. (1 << A3XX_INT_CP_REG_PROTECT_FAULT) | \
  139. (1 << A3XX_INT_CP_AHB_ERROR_HALT) | \
  140. (1 << A3XX_INT_UCHE_OOB_ACCESS))
  141. /**
  142. * _a3xx_pwron_fixup() - Initialize a special command buffer to run a
  143. * post-power collapse shader workaround
  144. * @adreno_dev: Pointer to a adreno_device struct
  145. *
  146. * Some targets require a special workaround shader to be executed after
  147. * power-collapse. Construct the IB once at init time and keep it
  148. * handy
  149. *
  150. * Returns: 0 on success or negative on error
  151. */
  152. static int _a3xx_pwron_fixup(struct adreno_device *adreno_dev)
  153. {
  154. unsigned int *cmds;
  155. int count = ARRAY_SIZE(_a3xx_pwron_fixup_fs_instructions);
  156. /* Return if the fixup is already in place */
  157. if (test_bit(ADRENO_DEVICE_PWRON_FIXUP, &adreno_dev->priv))
  158. return 0;
  159. adreno_dev->pwron_fixup = kgsl_allocate_global(KGSL_DEVICE(adreno_dev),
  160. PAGE_SIZE, 0, KGSL_MEMFLAGS_GPUREADONLY, 0, "pwron_fixup");
  161. if (IS_ERR(adreno_dev->pwron_fixup))
  162. return PTR_ERR(adreno_dev->pwron_fixup);
  163. cmds = adreno_dev->pwron_fixup->hostptr;
  164. *cmds++ = cp_type0_packet(A3XX_UCHE_CACHE_INVALIDATE0_REG, 2);
  165. *cmds++ = 0x00000000;
  166. *cmds++ = 0x90000000;
  167. *cmds++ = cp_type3_packet(CP_WAIT_FOR_IDLE, 1);
  168. *cmds++ = 0x00000000;
  169. *cmds++ = cp_type3_packet(CP_REG_RMW, 3);
  170. *cmds++ = A3XX_RBBM_CLOCK_CTL;
  171. *cmds++ = 0xFFFCFFFF;
  172. *cmds++ = 0x00010000;
  173. *cmds++ = cp_type3_packet(CP_WAIT_FOR_IDLE, 1);
  174. *cmds++ = 0x00000000;
  175. *cmds++ = cp_type0_packet(A3XX_HLSQ_CONTROL_0_REG, 1);
  176. *cmds++ = 0x1E000150;
  177. *cmds++ = cp_type3_packet(CP_WAIT_FOR_IDLE, 1);
  178. *cmds++ = 0x00000000;
  179. *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
  180. *cmds++ = CP_REG(A3XX_HLSQ_CONTROL_0_REG);
  181. *cmds++ = 0x1E000150;
  182. *cmds++ = cp_type3_packet(CP_WAIT_FOR_IDLE, 1);
  183. *cmds++ = 0x00000000;
  184. *cmds++ = cp_type0_packet(A3XX_HLSQ_CONTROL_0_REG, 1);
  185. *cmds++ = 0x1E000150;
  186. *cmds++ = cp_type0_packet(A3XX_HLSQ_CONTROL_1_REG, 1);
  187. *cmds++ = 0x00000040;
  188. *cmds++ = cp_type0_packet(A3XX_HLSQ_CONTROL_2_REG, 1);
  189. *cmds++ = 0x80000000;
  190. *cmds++ = cp_type0_packet(A3XX_HLSQ_CONTROL_3_REG, 1);
  191. *cmds++ = 0x00000000;
  192. *cmds++ = cp_type0_packet(A3XX_HLSQ_VS_CONTROL_REG, 1);
  193. *cmds++ = 0x00000001;
  194. *cmds++ = cp_type0_packet(A3XX_HLSQ_FS_CONTROL_REG, 1);
  195. *cmds++ = 0x0D001002;
  196. *cmds++ = cp_type0_packet(A3XX_HLSQ_CONST_VSPRESV_RANGE_REG, 1);
  197. *cmds++ = 0x00000000;
  198. *cmds++ = cp_type0_packet(A3XX_HLSQ_CONST_FSPRESV_RANGE_REG, 1);
  199. *cmds++ = 0x00000000;
  200. *cmds++ = cp_type0_packet(A3XX_HLSQ_CL_NDRANGE_0_REG, 1);
  201. *cmds++ = 0x00401101;
  202. *cmds++ = cp_type0_packet(A3XX_HLSQ_CL_NDRANGE_1_REG, 1);
  203. *cmds++ = 0x00000400;
  204. *cmds++ = cp_type0_packet(A3XX_HLSQ_CL_NDRANGE_2_REG, 1);
  205. *cmds++ = 0x00000000;
  206. *cmds++ = cp_type0_packet(A3XX_HLSQ_CL_NDRANGE_3_REG, 1);
  207. *cmds++ = 0x00000001;
  208. *cmds++ = cp_type0_packet(A3XX_HLSQ_CL_NDRANGE_4_REG, 1);
  209. *cmds++ = 0x00000000;
  210. *cmds++ = cp_type0_packet(A3XX_HLSQ_CL_NDRANGE_5_REG, 1);
  211. *cmds++ = 0x00000001;
  212. *cmds++ = cp_type0_packet(A3XX_HLSQ_CL_NDRANGE_6_REG, 1);
  213. *cmds++ = 0x00000000;
  214. *cmds++ = cp_type0_packet(A3XX_HLSQ_CL_CONTROL_0_REG, 1);
  215. *cmds++ = 0x00000000;
  216. *cmds++ = cp_type0_packet(A3XX_HLSQ_CL_CONTROL_1_REG, 1);
  217. *cmds++ = 0x00000000;
  218. *cmds++ = cp_type0_packet(A3XX_HLSQ_CL_KERNEL_CONST_REG, 1);
  219. *cmds++ = 0x00000000;
  220. *cmds++ = cp_type0_packet(A3XX_HLSQ_CL_KERNEL_GROUP_X_REG, 1);
  221. *cmds++ = 0x00000010;
  222. *cmds++ = cp_type0_packet(A3XX_HLSQ_CL_KERNEL_GROUP_Y_REG, 1);
  223. *cmds++ = 0x00000001;
  224. *cmds++ = cp_type0_packet(A3XX_HLSQ_CL_KERNEL_GROUP_Z_REG, 1);
  225. *cmds++ = 0x00000001;
  226. *cmds++ = cp_type0_packet(A3XX_HLSQ_CL_WG_OFFSET_REG, 1);
  227. *cmds++ = 0x00000000;
  228. *cmds++ = cp_type0_packet(A3XX_SP_SP_CTRL_REG, 1);
  229. *cmds++ = 0x00040000;
  230. *cmds++ = cp_type0_packet(A3XX_SP_VS_CTRL_REG0, 1);
  231. *cmds++ = 0x0000000A;
  232. *cmds++ = cp_type0_packet(A3XX_SP_VS_CTRL_REG1, 1);
  233. *cmds++ = 0x00000001;
  234. *cmds++ = cp_type0_packet(A3XX_SP_VS_PARAM_REG, 1);
  235. *cmds++ = 0x00000000;
  236. *cmds++ = cp_type0_packet(A3XX_SP_VS_OUT_REG_0, 1);
  237. *cmds++ = 0x00000000;
  238. *cmds++ = cp_type0_packet(A3XX_SP_VS_OUT_REG_1, 1);
  239. *cmds++ = 0x00000000;
  240. *cmds++ = cp_type0_packet(A3XX_SP_VS_OUT_REG_2, 1);
  241. *cmds++ = 0x00000000;
  242. *cmds++ = cp_type0_packet(A3XX_SP_VS_OUT_REG_3, 1);
  243. *cmds++ = 0x00000000;
  244. *cmds++ = cp_type0_packet(A3XX_SP_VS_OUT_REG_4, 1);
  245. *cmds++ = 0x00000000;
  246. *cmds++ = cp_type0_packet(A3XX_SP_VS_OUT_REG_5, 1);
  247. *cmds++ = 0x00000000;
  248. *cmds++ = cp_type0_packet(A3XX_SP_VS_OUT_REG_6, 1);
  249. *cmds++ = 0x00000000;
  250. *cmds++ = cp_type0_packet(A3XX_SP_VS_OUT_REG_7, 1);
  251. *cmds++ = 0x00000000;
  252. *cmds++ = cp_type0_packet(A3XX_SP_VS_VPC_DST_REG_0, 1);
  253. *cmds++ = 0x00000000;
  254. *cmds++ = cp_type0_packet(A3XX_SP_VS_VPC_DST_REG_1, 1);
  255. *cmds++ = 0x00000000;
  256. *cmds++ = cp_type0_packet(A3XX_SP_VS_VPC_DST_REG_2, 1);
  257. *cmds++ = 0x00000000;
  258. *cmds++ = cp_type0_packet(A3XX_SP_VS_VPC_DST_REG_3, 1);
  259. *cmds++ = 0x00000000;
  260. *cmds++ = cp_type0_packet(A3XX_SP_VS_OBJ_OFFSET_REG, 1);
  261. *cmds++ = 0x00000000;
  262. *cmds++ = cp_type0_packet(A3XX_SP_VS_OBJ_START_REG, 1);
  263. *cmds++ = 0x00000004;
  264. *cmds++ = cp_type0_packet(A3XX_SP_VS_PVT_MEM_PARAM_REG, 1);
  265. *cmds++ = 0x04008001;
  266. *cmds++ = cp_type0_packet(A3XX_SP_VS_PVT_MEM_ADDR_REG, 1);
  267. *cmds++ = 0x00000000;
  268. *cmds++ = cp_type0_packet(A3XX_SP_VS_PVT_MEM_SIZE_REG, 1);
  269. *cmds++ = 0x00000000;
  270. *cmds++ = cp_type0_packet(A3XX_SP_VS_LENGTH_REG, 1);
  271. *cmds++ = 0x00000000;
  272. *cmds++ = cp_type0_packet(A3XX_SP_FS_CTRL_REG0, 1);
  273. *cmds++ = 0x0DB0400A;
  274. *cmds++ = cp_type0_packet(A3XX_SP_FS_CTRL_REG1, 1);
  275. *cmds++ = 0x00300402;
  276. *cmds++ = cp_type0_packet(A3XX_SP_FS_OBJ_OFFSET_REG, 1);
  277. *cmds++ = 0x00010000;
  278. *cmds++ = cp_type0_packet(A3XX_SP_FS_OBJ_START_REG, 1);
  279. *cmds++ = 0x00000000;
  280. *cmds++ = cp_type0_packet(A3XX_SP_FS_PVT_MEM_PARAM_REG, 1);
  281. *cmds++ = 0x04008001;
  282. *cmds++ = cp_type0_packet(A3XX_SP_FS_PVT_MEM_ADDR_REG, 1);
  283. *cmds++ = 0x00000000;
  284. *cmds++ = cp_type0_packet(A3XX_SP_FS_PVT_MEM_SIZE_REG, 1);
  285. *cmds++ = 0x00000000;
  286. *cmds++ = cp_type0_packet(A3XX_SP_FS_FLAT_SHAD_MODE_REG_0, 1);
  287. *cmds++ = 0x00000000;
  288. *cmds++ = cp_type0_packet(A3XX_SP_FS_FLAT_SHAD_MODE_REG_1, 1);
  289. *cmds++ = 0x00000000;
  290. *cmds++ = cp_type0_packet(A3XX_SP_FS_OUTPUT_REG, 1);
  291. *cmds++ = 0x00000000;
  292. *cmds++ = cp_type0_packet(A3XX_SP_FS_MRT_REG_0, 1);
  293. *cmds++ = 0x00000000;
  294. *cmds++ = cp_type0_packet(A3XX_SP_FS_MRT_REG_1, 1);
  295. *cmds++ = 0x00000000;
  296. *cmds++ = cp_type0_packet(A3XX_SP_FS_MRT_REG_2, 1);
  297. *cmds++ = 0x00000000;
  298. *cmds++ = cp_type0_packet(A3XX_SP_FS_MRT_REG_3, 1);
  299. *cmds++ = 0x00000000;
  300. *cmds++ = cp_type0_packet(A3XX_SP_FS_IMAGE_OUTPUT_REG_0, 1);
  301. *cmds++ = 0x00000000;
  302. *cmds++ = cp_type0_packet(A3XX_SP_FS_IMAGE_OUTPUT_REG_1, 1);
  303. *cmds++ = 0x00000000;
  304. *cmds++ = cp_type0_packet(A3XX_SP_FS_IMAGE_OUTPUT_REG_2, 1);
  305. *cmds++ = 0x00000000;
  306. *cmds++ = cp_type0_packet(A3XX_SP_FS_IMAGE_OUTPUT_REG_3, 1);
  307. *cmds++ = 0x00000000;
  308. *cmds++ = cp_type0_packet(A3XX_SP_FS_LENGTH_REG, 1);
  309. *cmds++ = 0x0000000D;
  310. *cmds++ = cp_type0_packet(A3XX_GRAS_CL_CLIP_CNTL, 1);
  311. *cmds++ = 0x00000000;
  312. *cmds++ = cp_type0_packet(A3XX_GRAS_CL_GB_CLIP_ADJ, 1);
  313. *cmds++ = 0x00000000;
  314. *cmds++ = cp_type0_packet(A3XX_GRAS_CL_VPORT_XOFFSET, 1);
  315. *cmds++ = 0x00000000;
  316. *cmds++ = cp_type0_packet(A3XX_GRAS_CL_VPORT_XSCALE, 1);
  317. *cmds++ = 0x00000000;
  318. *cmds++ = cp_type0_packet(A3XX_GRAS_CL_VPORT_YOFFSET, 1);
  319. *cmds++ = 0x00000000;
  320. *cmds++ = cp_type0_packet(A3XX_GRAS_CL_VPORT_YSCALE, 1);
  321. *cmds++ = 0x00000000;
  322. *cmds++ = cp_type0_packet(A3XX_GRAS_CL_VPORT_ZOFFSET, 1);
  323. *cmds++ = 0x00000000;
  324. *cmds++ = cp_type0_packet(A3XX_GRAS_CL_VPORT_ZSCALE, 1);
  325. *cmds++ = 0x00000000;
  326. *cmds++ = cp_type0_packet(A3XX_GRAS_CL_USER_PLANE_X0, 1);
  327. *cmds++ = 0x00000000;
  328. *cmds++ = cp_type0_packet(A3XX_GRAS_CL_USER_PLANE_Y0, 1);
  329. *cmds++ = 0x00000000;
  330. *cmds++ = cp_type0_packet(A3XX_GRAS_CL_USER_PLANE_Z0, 1);
  331. *cmds++ = 0x00000000;
  332. *cmds++ = cp_type0_packet(A3XX_GRAS_CL_USER_PLANE_W0, 1);
  333. *cmds++ = 0x00000000;
  334. *cmds++ = cp_type0_packet(A3XX_GRAS_CL_USER_PLANE_X1, 1);
  335. *cmds++ = 0x00000000;
  336. *cmds++ = cp_type0_packet(A3XX_GRAS_CL_USER_PLANE_Y1, 1);
  337. *cmds++ = 0x00000000;
  338. *cmds++ = cp_type0_packet(A3XX_GRAS_CL_USER_PLANE_Z1, 1);
  339. *cmds++ = 0x00000000;
  340. *cmds++ = cp_type0_packet(A3XX_GRAS_CL_USER_PLANE_W1, 1);
  341. *cmds++ = 0x00000000;
  342. *cmds++ = cp_type0_packet(A3XX_GRAS_CL_USER_PLANE_X2, 1);
  343. *cmds++ = 0x00000000;
  344. *cmds++ = cp_type0_packet(A3XX_GRAS_CL_USER_PLANE_Y2, 1);
  345. *cmds++ = 0x00000000;
  346. *cmds++ = cp_type0_packet(A3XX_GRAS_CL_USER_PLANE_Z2, 1);
  347. *cmds++ = 0x00000000;
  348. *cmds++ = cp_type0_packet(A3XX_GRAS_CL_USER_PLANE_W2, 1);
  349. *cmds++ = 0x00000000;
  350. *cmds++ = cp_type0_packet(A3XX_GRAS_CL_USER_PLANE_X3, 1);
  351. *cmds++ = 0x00000000;
  352. *cmds++ = cp_type0_packet(A3XX_GRAS_CL_USER_PLANE_Y3, 1);
  353. *cmds++ = 0x00000000;
  354. *cmds++ = cp_type0_packet(A3XX_GRAS_CL_USER_PLANE_Z3, 1);
  355. *cmds++ = 0x00000000;
  356. *cmds++ = cp_type0_packet(A3XX_GRAS_CL_USER_PLANE_W3, 1);
  357. *cmds++ = 0x00000000;
  358. *cmds++ = cp_type0_packet(A3XX_GRAS_CL_USER_PLANE_X4, 1);
  359. *cmds++ = 0x00000000;
  360. *cmds++ = cp_type0_packet(A3XX_GRAS_CL_USER_PLANE_Y4, 1);
  361. *cmds++ = 0x00000000;
  362. *cmds++ = cp_type0_packet(A3XX_GRAS_CL_USER_PLANE_Z4, 1);
  363. *cmds++ = 0x00000000;
  364. *cmds++ = cp_type0_packet(A3XX_GRAS_CL_USER_PLANE_W4, 1);
  365. *cmds++ = 0x00000000;
  366. *cmds++ = cp_type0_packet(A3XX_GRAS_CL_USER_PLANE_X5, 1);
  367. *cmds++ = 0x00000000;
  368. *cmds++ = cp_type0_packet(A3XX_GRAS_CL_USER_PLANE_Y5, 1);
  369. *cmds++ = 0x00000000;
  370. *cmds++ = cp_type0_packet(A3XX_GRAS_CL_USER_PLANE_Z5, 1);
  371. *cmds++ = 0x00000000;
  372. *cmds++ = cp_type0_packet(A3XX_GRAS_CL_USER_PLANE_W5, 1);
  373. *cmds++ = 0x00000000;
  374. *cmds++ = cp_type0_packet(A3XX_GRAS_SU_POINT_MINMAX, 1);
  375. *cmds++ = 0x00000000;
  376. *cmds++ = cp_type0_packet(A3XX_GRAS_SU_POINT_SIZE, 1);
  377. *cmds++ = 0x00000000;
  378. *cmds++ = cp_type0_packet(A3XX_GRAS_SU_POLY_OFFSET_OFFSET, 1);
  379. *cmds++ = 0x00000000;
  380. *cmds++ = cp_type0_packet(A3XX_GRAS_SU_POLY_OFFSET_SCALE, 1);
  381. *cmds++ = 0x00000000;
  382. *cmds++ = cp_type0_packet(A3XX_GRAS_SU_MODE_CONTROL, 1);
  383. *cmds++ = 0x00000000;
  384. *cmds++ = cp_type0_packet(A3XX_GRAS_SC_CONTROL, 1);
  385. *cmds++ = 0x00000000;
  386. *cmds++ = cp_type0_packet(A3XX_GRAS_SC_SCREEN_SCISSOR_TL, 1);
  387. *cmds++ = 0x00000000;
  388. *cmds++ = cp_type0_packet(A3XX_GRAS_SC_SCREEN_SCISSOR_BR, 1);
  389. *cmds++ = 0x00000000;
  390. *cmds++ = cp_type0_packet(A3XX_GRAS_SC_WINDOW_SCISSOR_BR, 1);
  391. *cmds++ = 0x00000000;
  392. *cmds++ = cp_type0_packet(A3XX_GRAS_SC_WINDOW_SCISSOR_TL, 1);
  393. *cmds++ = 0x00000000;
  394. *cmds++ = cp_type0_packet(A3XX_GRAS_TSE_DEBUG_ECO, 1);
  395. *cmds++ = 0x00000000;
  396. *cmds++ = cp_type0_packet(A3XX_GRAS_PERFCOUNTER0_SELECT, 1);
  397. *cmds++ = 0x00000000;
  398. *cmds++ = cp_type0_packet(A3XX_GRAS_PERFCOUNTER1_SELECT, 1);
  399. *cmds++ = 0x00000000;
  400. *cmds++ = cp_type0_packet(A3XX_GRAS_PERFCOUNTER2_SELECT, 1);
  401. *cmds++ = 0x00000000;
  402. *cmds++ = cp_type0_packet(A3XX_GRAS_PERFCOUNTER3_SELECT, 1);
  403. *cmds++ = 0x00000000;
  404. *cmds++ = cp_type0_packet(A3XX_RB_MODE_CONTROL, 1);
  405. *cmds++ = 0x00008000;
  406. *cmds++ = cp_type0_packet(A3XX_RB_RENDER_CONTROL, 1);
  407. *cmds++ = 0x00000000;
  408. *cmds++ = cp_type0_packet(A3XX_RB_MSAA_CONTROL, 1);
  409. *cmds++ = 0x00000000;
  410. *cmds++ = cp_type0_packet(A3XX_RB_ALPHA_REFERENCE, 1);
  411. *cmds++ = 0x00000000;
  412. *cmds++ = cp_type0_packet(A3XX_RB_MRT_CONTROL0, 1);
  413. *cmds++ = 0x00000000;
  414. *cmds++ = cp_type0_packet(A3XX_RB_MRT_CONTROL1, 1);
  415. *cmds++ = 0x00000000;
  416. *cmds++ = cp_type0_packet(A3XX_RB_MRT_CONTROL2, 1);
  417. *cmds++ = 0x00000000;
  418. *cmds++ = cp_type0_packet(A3XX_RB_MRT_CONTROL3, 1);
  419. *cmds++ = 0x00000000;
  420. *cmds++ = cp_type0_packet(A3XX_RB_MRT_BUF_INFO0, 1);
  421. *cmds++ = 0x00000000;
  422. *cmds++ = cp_type0_packet(A3XX_RB_MRT_BUF_INFO1, 1);
  423. *cmds++ = 0x00000000;
  424. *cmds++ = cp_type0_packet(A3XX_RB_MRT_BUF_INFO2, 1);
  425. *cmds++ = 0x00000000;
  426. *cmds++ = cp_type0_packet(A3XX_RB_MRT_BUF_INFO3, 1);
  427. *cmds++ = 0x00000000;
  428. *cmds++ = cp_type0_packet(A3XX_RB_MRT_BUF_BASE0, 1);
  429. *cmds++ = 0x00000000;
  430. *cmds++ = cp_type0_packet(A3XX_RB_MRT_BUF_BASE1, 1);
  431. *cmds++ = 0x00000000;
  432. *cmds++ = cp_type0_packet(A3XX_RB_MRT_BUF_BASE2, 1);
  433. *cmds++ = 0x00000000;
  434. *cmds++ = cp_type0_packet(A3XX_RB_MRT_BUF_BASE3, 1);
  435. *cmds++ = 0x00000000;
  436. *cmds++ = cp_type0_packet(A3XX_RB_MRT_BLEND_CONTROL0, 1);
  437. *cmds++ = 0x00000000;
  438. *cmds++ = cp_type0_packet(A3XX_RB_MRT_BLEND_CONTROL1, 1);
  439. *cmds++ = 0x00000000;
  440. *cmds++ = cp_type0_packet(A3XX_RB_MRT_BLEND_CONTROL2, 1);
  441. *cmds++ = 0x00000000;
  442. *cmds++ = cp_type0_packet(A3XX_RB_MRT_BLEND_CONTROL3, 1);
  443. *cmds++ = 0x00000000;
  444. *cmds++ = cp_type0_packet(A3XX_RB_BLEND_RED, 1);
  445. *cmds++ = 0x00000000;
  446. *cmds++ = cp_type0_packet(A3XX_RB_BLEND_GREEN, 1);
  447. *cmds++ = 0x00000000;
  448. *cmds++ = cp_type0_packet(A3XX_RB_BLEND_BLUE, 1);
  449. *cmds++ = 0x00000000;
  450. *cmds++ = cp_type0_packet(A3XX_RB_BLEND_ALPHA, 1);
  451. *cmds++ = 0x00000000;
  452. *cmds++ = cp_type0_packet(A3XX_RB_CLEAR_COLOR_DW0, 1);
  453. *cmds++ = 0x00000000;
  454. *cmds++ = cp_type0_packet(A3XX_RB_CLEAR_COLOR_DW1, 1);
  455. *cmds++ = 0x00000000;
  456. *cmds++ = cp_type0_packet(A3XX_RB_CLEAR_COLOR_DW2, 1);
  457. *cmds++ = 0x00000000;
  458. *cmds++ = cp_type0_packet(A3XX_RB_CLEAR_COLOR_DW3, 1);
  459. *cmds++ = 0x00000000;
  460. *cmds++ = cp_type0_packet(A3XX_RB_COPY_CONTROL, 1);
  461. *cmds++ = 0x00000000;
  462. *cmds++ = cp_type0_packet(A3XX_RB_COPY_DEST_BASE, 1);
  463. *cmds++ = 0x00000000;
  464. *cmds++ = cp_type0_packet(A3XX_RB_COPY_DEST_PITCH, 1);
  465. *cmds++ = 0x00000000;
  466. *cmds++ = cp_type0_packet(A3XX_RB_COPY_DEST_INFO, 1);
  467. *cmds++ = 0x00000000;
  468. *cmds++ = cp_type0_packet(A3XX_RB_DEPTH_CONTROL, 1);
  469. *cmds++ = 0x00000000;
  470. *cmds++ = cp_type0_packet(A3XX_RB_DEPTH_CLEAR, 1);
  471. *cmds++ = 0x00000000;
  472. *cmds++ = cp_type0_packet(A3XX_RB_DEPTH_BUF_INFO, 1);
  473. *cmds++ = 0x00000000;
  474. *cmds++ = cp_type0_packet(A3XX_RB_DEPTH_BUF_PITCH, 1);
  475. *cmds++ = 0x00000000;
  476. *cmds++ = cp_type0_packet(A3XX_RB_STENCIL_CONTROL, 1);
  477. *cmds++ = 0x00000000;
  478. *cmds++ = cp_type0_packet(A3XX_RB_STENCIL_CLEAR, 1);
  479. *cmds++ = 0x00000000;
  480. *cmds++ = cp_type0_packet(A3XX_RB_STENCIL_BUF_INFO, 1);
  481. *cmds++ = 0x00000000;
  482. *cmds++ = cp_type0_packet(A3XX_RB_STENCIL_BUF_PITCH, 1);
  483. *cmds++ = 0x00000000;
  484. *cmds++ = cp_type0_packet(A3XX_RB_STENCIL_REF_MASK, 1);
  485. *cmds++ = 0x00000000;
  486. *cmds++ = cp_type0_packet(A3XX_RB_STENCIL_REF_MASK_BF, 1);
  487. *cmds++ = 0x00000000;
  488. *cmds++ = cp_type0_packet(A3XX_RB_LRZ_VSC_CONTROL, 1);
  489. *cmds++ = 0x00000000;
  490. *cmds++ = cp_type0_packet(A3XX_RB_WINDOW_OFFSET, 1);
  491. *cmds++ = 0x00000000;
  492. *cmds++ = cp_type0_packet(A3XX_RB_SAMPLE_COUNT_CONTROL, 1);
  493. *cmds++ = 0x00000000;
  494. *cmds++ = cp_type0_packet(A3XX_RB_SAMPLE_COUNT_ADDR, 1);
  495. *cmds++ = 0x00000000;
  496. *cmds++ = cp_type0_packet(A3XX_RB_Z_CLAMP_MIN, 1);
  497. *cmds++ = 0x00000000;
  498. *cmds++ = cp_type0_packet(A3XX_RB_Z_CLAMP_MAX, 1);
  499. *cmds++ = 0x00000000;
  500. *cmds++ = cp_type0_packet(A3XX_RB_GMEM_BASE_ADDR, 1);
  501. *cmds++ = 0x00000000;
  502. *cmds++ = cp_type0_packet(A3XX_RB_DEBUG_ECO_CONTROLS_ADDR, 1);
  503. *cmds++ = 0x00000000;
  504. *cmds++ = cp_type0_packet(A3XX_RB_PERFCOUNTER0_SELECT, 1);
  505. *cmds++ = 0x00000000;
  506. *cmds++ = cp_type0_packet(A3XX_RB_PERFCOUNTER1_SELECT, 1);
  507. *cmds++ = 0x00000000;
  508. *cmds++ = cp_type0_packet(A3XX_RB_FRAME_BUFFER_DIMENSION, 1);
  509. *cmds++ = 0x00000000;
  510. *cmds++ = cp_type3_packet(CP_LOAD_STATE, 4);
  511. *cmds++ = (1 << CP_LOADSTATE_DSTOFFSET_SHIFT) |
  512. (0 << CP_LOADSTATE_STATESRC_SHIFT) |
  513. (6 << CP_LOADSTATE_STATEBLOCKID_SHIFT) |
  514. (1 << CP_LOADSTATE_NUMOFUNITS_SHIFT);
  515. *cmds++ = (1 << CP_LOADSTATE_STATETYPE_SHIFT) |
  516. (0 << CP_LOADSTATE_EXTSRCADDR_SHIFT);
  517. *cmds++ = 0x00400000;
  518. *cmds++ = 0x00000000;
  519. *cmds++ = cp_type3_packet(CP_LOAD_STATE, 4);
  520. *cmds++ = (2 << CP_LOADSTATE_DSTOFFSET_SHIFT) |
  521. (6 << CP_LOADSTATE_STATEBLOCKID_SHIFT) |
  522. (1 << CP_LOADSTATE_NUMOFUNITS_SHIFT);
  523. *cmds++ = (1 << CP_LOADSTATE_STATETYPE_SHIFT);
  524. *cmds++ = 0x00400220;
  525. *cmds++ = 0x00000000;
  526. *cmds++ = cp_type3_packet(CP_LOAD_STATE, 4);
  527. *cmds++ = (6 << CP_LOADSTATE_STATEBLOCKID_SHIFT) |
  528. (1 << CP_LOADSTATE_NUMOFUNITS_SHIFT);
  529. *cmds++ = (1 << CP_LOADSTATE_STATETYPE_SHIFT);
  530. *cmds++ = 0x00000000;
  531. *cmds++ = 0x00000000;
  532. *cmds++ = cp_type3_packet(CP_LOAD_STATE, 2 + count);
  533. *cmds++ = (6 << CP_LOADSTATE_STATEBLOCKID_SHIFT) |
  534. (13 << CP_LOADSTATE_NUMOFUNITS_SHIFT);
  535. *cmds++ = 0x00000000;
  536. memcpy(cmds, _a3xx_pwron_fixup_fs_instructions, count << 2);
  537. cmds += count;
  538. *cmds++ = cp_type3_packet(CP_EXEC_CL, 1);
  539. *cmds++ = 0x00000000;
  540. *cmds++ = cp_type3_packet(CP_WAIT_FOR_IDLE, 1);
  541. *cmds++ = 0x00000000;
  542. *cmds++ = cp_type0_packet(A3XX_HLSQ_CL_CONTROL_0_REG, 1);
  543. *cmds++ = 0x00000000;
  544. *cmds++ = cp_type3_packet(CP_WAIT_FOR_IDLE, 1);
  545. *cmds++ = 0x00000000;
  546. *cmds++ = cp_type0_packet(A3XX_HLSQ_CONTROL_0_REG, 1);
  547. *cmds++ = 0x1E000150;
  548. *cmds++ = cp_type3_packet(CP_WAIT_FOR_IDLE, 1);
  549. *cmds++ = 0x00000000;
  550. *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
  551. *cmds++ = CP_REG(A3XX_HLSQ_CONTROL_0_REG);
  552. *cmds++ = 0x1E000050;
  553. *cmds++ = cp_type3_packet(CP_WAIT_FOR_IDLE, 1);
  554. *cmds++ = 0x00000000;
  555. *cmds++ = cp_type3_packet(CP_REG_RMW, 3);
  556. *cmds++ = A3XX_RBBM_CLOCK_CTL;
  557. *cmds++ = 0xFFFCFFFF;
  558. *cmds++ = 0x00000000;
  559. *cmds++ = cp_type3_packet(CP_WAIT_FOR_IDLE, 1);
  560. *cmds++ = 0x00000000;
  561. /*
  562. * Remember the number of dwords in the command buffer for when we
  563. * program the indirect buffer call in the ringbuffer
  564. */
  565. adreno_dev->pwron_fixup_dwords =
  566. (cmds - (unsigned int *) adreno_dev->pwron_fixup->hostptr);
  567. /* Mark the flag in ->priv to show that we have the fix */
  568. set_bit(ADRENO_DEVICE_PWRON_FIXUP, &adreno_dev->priv);
  569. return 0;
  570. }
  571. static int a3xx_probe(struct platform_device *pdev,
  572. u32 chipid, const struct adreno_gpu_core *gpucore)
  573. {
  574. struct adreno_device *adreno_dev;
  575. struct kgsl_device *device;
  576. int ret;
  577. adreno_dev = (struct adreno_device *)
  578. of_device_get_match_data(&pdev->dev);
  579. memset(adreno_dev, 0, sizeof(*adreno_dev));
  580. adreno_dev->gpucore = gpucore;
  581. adreno_dev->chipid = chipid;
  582. adreno_reg_offset_init(gpucore->gpudev->reg_offsets);
  583. device = KGSL_DEVICE(adreno_dev);
  584. timer_setup(&device->idle_timer, kgsl_timer, 0);
  585. INIT_WORK(&device->idle_check_ws, kgsl_idle_check);
  586. adreno_dev->irq_mask = A3XX_INT_MASK;
  587. ret = adreno_device_probe(pdev, adreno_dev);
  588. if (ret)
  589. return ret;
  590. a3xx_coresight_init(adreno_dev);
  591. return adreno_dispatcher_init(adreno_dev);
  592. }
  593. static int a3xx_send_me_init(struct adreno_device *adreno_dev,
  594. struct adreno_ringbuffer *rb)
  595. {
  596. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  597. unsigned int *cmds;
  598. int ret;
  599. cmds = adreno_ringbuffer_allocspace(rb, 18);
  600. if (IS_ERR(cmds))
  601. return PTR_ERR(cmds);
  602. *cmds++ = cp_type3_packet(CP_ME_INIT, 17);
  603. *cmds++ = 0x000003f7;
  604. *cmds++ = 0x00000000;
  605. *cmds++ = 0x00000000;
  606. *cmds++ = 0x00000000;
  607. *cmds++ = 0x00000080;
  608. *cmds++ = 0x00000100;
  609. *cmds++ = 0x00000180;
  610. *cmds++ = 0x00006600;
  611. *cmds++ = 0x00000150;
  612. *cmds++ = 0x0000014e;
  613. *cmds++ = 0x00000154;
  614. *cmds++ = 0x00000001;
  615. *cmds++ = 0x00000000;
  616. *cmds++ = 0x00000000;
  617. /* Enable protected mode registers for A3XX */
  618. *cmds++ = 0x20000000;
  619. *cmds++ = 0x00000000;
  620. *cmds++ = 0x00000000;
  621. /* Submit the command to the ringbuffer */
  622. kgsl_pwrscale_busy(device);
  623. kgsl_regwrite(device, A3XX_CP_RB_WPTR, rb->_wptr);
  624. rb->wptr = rb->_wptr;
  625. ret = adreno_spin_idle(adreno_dev, 2000);
  626. if (ret) {
  627. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  628. dev_err(device->dev, "CP initialization failed to idle\n");
  629. kgsl_device_snapshot(device, NULL, NULL, false);
  630. }
  631. return ret;
  632. }
  633. static void a3xx_microcode_load(struct adreno_device *adreno_dev);
  634. static int a3xx_rb_start(struct adreno_device *adreno_dev)
  635. {
  636. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  637. struct adreno_ringbuffer *rb = ADRENO_CURRENT_RINGBUFFER(adreno_dev);
  638. memset(rb->buffer_desc->hostptr, 0xaa, KGSL_RB_SIZE);
  639. rb->wptr = 0;
  640. rb->_wptr = 0;
  641. rb->wptr_preempt_end = ~0;
  642. /*
  643. * The size of the ringbuffer in the hardware is the log2
  644. * representation of the size in quadwords (sizedwords / 2).
  645. * Also disable the host RPTR shadow register as it might be unreliable
  646. * in certain circumstances.
  647. */
  648. kgsl_regwrite(device, A3XX_CP_RB_CNTL,
  649. (ilog2(KGSL_RB_DWORDS >> 1) & 0x3F) |
  650. (1 << 27));
  651. kgsl_regwrite(device, A3XX_CP_RB_BASE, rb->buffer_desc->gpuaddr);
  652. a3xx_microcode_load(adreno_dev);
  653. /* clear ME_HALT to start micro engine */
  654. kgsl_regwrite(device, A3XX_CP_ME_CNTL, 0);
  655. return a3xx_send_me_init(adreno_dev, rb);
  656. }
  657. /*
  658. * a3xx soft fault detection
  659. *
  660. * a3xx targets do not have hardware fault detection so we need to do it the old
  661. * fashioned way by periodically reading a set of registers and counters and
  662. * checking that they are advancing. There are 6 registers and four 64 bit
  663. * counters that we keep an eye on.
  664. */
  665. #define A3XX_SOFT_FAULT_DETECT_REGS 6
  666. #define A3XX_SOFT_FAULT_DETECT_COUNTERS 4
  667. #define A3XX_SOFT_FAULT_DETECT_COUNT \
  668. (A3XX_SOFT_FAULT_DETECT_REGS + (A3XX_SOFT_FAULT_DETECT_COUNTERS * 2))
  669. static bool a3xx_soft_fault_detect_isidle(struct adreno_device *adreno_dev)
  670. {
  671. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  672. u32 reg;
  673. if (kgsl_state_is_awake(device)) {
  674. if (!adreno_rb_empty(adreno_dev->cur_rb))
  675. return false;
  676. /* only check rbbm status to determine if GPU is idle */
  677. kgsl_regread(device, A3XX_RBBM_STATUS, &reg);
  678. if (reg & 0x7ffffffe)
  679. return false;
  680. }
  681. memset(adreno_dev->soft_ft_vals, 0, A3XX_SOFT_FAULT_DETECT_COUNT << 2);
  682. return true;
  683. }
  684. /* Read the fault detect registers and compare them to the stored version */
  685. static int a3xx_soft_fault_detect_read_compare(struct adreno_device *adreno_dev)
  686. {
  687. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  688. struct adreno_ringbuffer *rb = ADRENO_CURRENT_RINGBUFFER(adreno_dev);
  689. int i, ret = 0;
  690. unsigned int ts;
  691. if (!test_bit(ADRENO_DEVICE_SOFT_FAULT_DETECT, &adreno_dev->priv))
  692. return 1;
  693. /* Check to see if the device is idle - if so report no hang */
  694. if (a3xx_soft_fault_detect_isidle(adreno_dev))
  695. ret = 1;
  696. for (i = 0; i < A3XX_SOFT_FAULT_DETECT_COUNT; i++) {
  697. unsigned int val;
  698. if (!adreno_dev->soft_ft_regs[i])
  699. continue;
  700. kgsl_regread(device, adreno_dev->soft_ft_regs[i], &val);
  701. if (val != adreno_dev->soft_ft_vals[i])
  702. ret = 1;
  703. adreno_dev->soft_ft_vals[i] = val;
  704. }
  705. if (!adreno_rb_readtimestamp(adreno_dev, adreno_dev->cur_rb,
  706. KGSL_TIMESTAMP_RETIRED, &ts)) {
  707. if (ts != rb->fault_detect_ts)
  708. ret = 1;
  709. rb->fault_detect_ts = ts;
  710. }
  711. return ret;
  712. }
  713. /*
  714. * This is called on a regular basis while cmdobjs are inflight. Fault
  715. * detection registers are read and compared to the existing values - if they
  716. * changed then the GPU is still running. If they are the same between
  717. * subsequent calls then the GPU may have faulted
  718. */
  719. static void a3xx_soft_fault_timer(struct timer_list *t)
  720. {
  721. struct adreno_dispatcher *dispatcher = from_timer(dispatcher,
  722. t, fault_timer);
  723. struct adreno_device *adreno_dev = container_of(dispatcher,
  724. struct adreno_device, dispatcher);
  725. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  726. /* Leave if the user decided to turn off fast hang detection */
  727. if (!adreno_soft_fault_detect(adreno_dev))
  728. return;
  729. if (adreno_gpu_fault(adreno_dev)) {
  730. adreno_dispatcher_schedule(device);
  731. return;
  732. }
  733. /*
  734. * Read the fault registers - if it returns 0 then they haven't changed
  735. * so mark the dispatcher as faulted and schedule the work loop.
  736. */
  737. if (!a3xx_soft_fault_detect_read_compare(adreno_dev))
  738. adreno_dispatcher_fault(adreno_dev, ADRENO_SOFT_FAULT);
  739. else if (dispatcher->inflight > 0)
  740. adreno_dispatcher_start_fault_timer(adreno_dev);
  741. }
  742. /*
  743. * Start fault detection. The counters are only assigned while fault detection
  744. * is running so that they can be used for other purposes if fault detection is
  745. * disabled
  746. */
  747. static void a3xx_soft_fault_detect_start(struct adreno_device *adreno_dev)
  748. {
  749. u32 *regs = &adreno_dev->soft_ft_regs[A3XX_SOFT_FAULT_DETECT_COUNTERS];
  750. int ret = 0;
  751. if (!test_bit(ADRENO_DEVICE_SOFT_FAULT_DETECT, &adreno_dev->priv))
  752. return;
  753. if (adreno_dev->fast_hang_detect == 1)
  754. return;
  755. ret |= adreno_perfcounter_kernel_get(adreno_dev,
  756. KGSL_PERFCOUNTER_GROUP_SP, SP_ALU_ACTIVE_CYCLES,
  757. &regs[0], &regs[1]);
  758. ret |= adreno_perfcounter_kernel_get(adreno_dev,
  759. KGSL_PERFCOUNTER_GROUP_SP, SP0_ICL1_MISSES,
  760. &regs[2], &regs[3]);
  761. ret |= adreno_perfcounter_kernel_get(adreno_dev,
  762. KGSL_PERFCOUNTER_GROUP_SP, SP_FS_CFLOW_INSTRUCTIONS,
  763. &regs[4], &regs[5]);
  764. ret |= adreno_perfcounter_kernel_get(adreno_dev,
  765. KGSL_PERFCOUNTER_GROUP_TSE, TSE_INPUT_PRIM_NUM,
  766. &regs[6], &regs[7]);
  767. WARN(ret, "Unable to allocate one or more fault detect counters\n");
  768. adreno_dev->fast_hang_detect = 1;
  769. }
  770. /* Helper function to put back a counter */
  771. static void put_counter(struct adreno_device *adreno_dev,
  772. int group, int countable, u32 *lo, u32 *hi)
  773. {
  774. adreno_perfcounter_put(adreno_dev, group, countable,
  775. PERFCOUNTER_FLAG_KERNEL);
  776. *lo = 0;
  777. *hi = 0;
  778. }
  779. /* Stop fault detection and return the counters */
  780. static void a3xx_soft_fault_detect_stop(struct adreno_device *adreno_dev)
  781. {
  782. u32 *regs = &adreno_dev->soft_ft_regs[A3XX_SOFT_FAULT_DETECT_COUNTERS];
  783. if (!test_bit(ADRENO_DEVICE_SOFT_FAULT_DETECT, &adreno_dev->priv))
  784. return;
  785. if (!adreno_dev->fast_hang_detect)
  786. return;
  787. put_counter(adreno_dev, KGSL_PERFCOUNTER_GROUP_SP, SP_ALU_ACTIVE_CYCLES,
  788. &regs[0], &regs[1]);
  789. put_counter(adreno_dev, KGSL_PERFCOUNTER_GROUP_SP, SP0_ICL1_MISSES,
  790. &regs[2], &regs[3]);
  791. put_counter(adreno_dev, KGSL_PERFCOUNTER_GROUP_SP,
  792. SP_FS_CFLOW_INSTRUCTIONS, &regs[4], &regs[5]);
  793. put_counter(adreno_dev, KGSL_PERFCOUNTER_GROUP_TSE, TSE_INPUT_PRIM_NUM,
  794. &regs[6], &regs[7]);
  795. adreno_dev->fast_hang_detect = 0;
  796. }
  797. /* Initialize the registers and set up the data structures */
  798. static void a3xx_soft_fault_detect_init(struct adreno_device *adreno_dev)
  799. {
  800. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  801. if (!ADRENO_FEATURE(adreno_dev, ADRENO_SOFT_FAULT_DETECT))
  802. return;
  803. /* Disable the fast hang detect bit until we know its a go */
  804. adreno_dev->fast_hang_detect = 0;
  805. adreno_dev->soft_ft_regs = devm_kcalloc(&device->pdev->dev,
  806. A3XX_SOFT_FAULT_DETECT_COUNT, sizeof(u32), GFP_KERNEL);
  807. adreno_dev->soft_ft_vals = devm_kcalloc(&device->pdev->dev,
  808. A3XX_SOFT_FAULT_DETECT_COUNT, sizeof(u32), GFP_KERNEL);
  809. if (!adreno_dev->soft_ft_regs || !adreno_dev->soft_ft_vals)
  810. return;
  811. adreno_dev->soft_ft_count = A3XX_SOFT_FAULT_DETECT_COUNT;
  812. adreno_dev->soft_ft_regs[0] = A3XX_RBBM_STATUS;
  813. adreno_dev->soft_ft_regs[1] = A3XX_CP_RB_RPTR;
  814. adreno_dev->soft_ft_regs[2] = A3XX_CP_IB1_BASE;
  815. adreno_dev->soft_ft_regs[3] = A3XX_CP_IB1_BUFSZ;
  816. adreno_dev->soft_ft_regs[4] = A3XX_CP_IB2_BASE;
  817. adreno_dev->soft_ft_regs[5] = A3XX_CP_IB2_BUFSZ;
  818. set_bit(ADRENO_DEVICE_SOFT_FAULT_DETECT, &adreno_dev->priv);
  819. a3xx_soft_fault_detect_start(adreno_dev);
  820. }
  821. static void a3xx_remove(struct adreno_device *adreno_dev)
  822. {
  823. a3xx_soft_fault_detect_stop(adreno_dev);
  824. }
  825. static int a3xx_microcode_read(struct adreno_device *adreno_dev);
  826. /*
  827. * a3xx_init() - Initialize gpu specific data
  828. * @adreno_dev: Pointer to adreno device
  829. */
  830. static int a3xx_init(struct adreno_device *adreno_dev)
  831. {
  832. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  833. struct kgsl_iommu *iommu = KGSL_IOMMU(device);
  834. struct adreno_dispatcher *dispatcher = &adreno_dev->dispatcher;
  835. int ret;
  836. /*
  837. * Set up the a3xx only soft fault timer before heading into the generic
  838. * dispatcher setup
  839. */
  840. if (ADRENO_FEATURE(adreno_dev, ADRENO_SOFT_FAULT_DETECT))
  841. timer_setup(&dispatcher->fault_timer, a3xx_soft_fault_timer, 0);
  842. ret = a3xx_ringbuffer_init(adreno_dev);
  843. if (ret)
  844. return ret;
  845. ret = a3xx_microcode_read(adreno_dev);
  846. if (ret)
  847. return ret;
  848. _a3xx_pwron_fixup(adreno_dev);
  849. ret = adreno_allocate_global(device, &iommu->setstate, PAGE_SIZE,
  850. 0, KGSL_MEMFLAGS_GPUREADONLY, 0, "setstate");
  851. if (!ret)
  852. kgsl_sharedmem_writel(iommu->setstate,
  853. KGSL_IOMMU_SETSTATE_NOP_OFFSET,
  854. cp_type3_packet(CP_NOP, 1));
  855. kgsl_mmu_set_feature(device, KGSL_MMU_NEED_GUARD_PAGE);
  856. /* Put the hardware in a responsive state to set up fault detection*/
  857. ret = kgsl_pwrctrl_change_state(device, KGSL_STATE_AWARE);
  858. if (ret)
  859. return ret;
  860. a3xx_soft_fault_detect_init(adreno_dev);
  861. kgsl_pwrctrl_change_state(device, KGSL_STATE_SLUMBER);
  862. return 0;
  863. }
  864. /*
  865. * a3xx_err_callback() - Call back for a3xx error interrupts
  866. * @adreno_dev: Pointer to device
  867. * @bit: Interrupt bit
  868. */
  869. static void a3xx_err_callback(struct adreno_device *adreno_dev, int bit)
  870. {
  871. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  872. unsigned int reg;
  873. switch (bit) {
  874. case A3XX_INT_RBBM_AHB_ERROR: {
  875. kgsl_regread(device, A3XX_RBBM_AHB_ERROR_STATUS, &reg);
  876. /*
  877. * Return the word address of the erroring register so that it
  878. * matches the register specification
  879. */
  880. dev_crit_ratelimited(device->dev,
  881. "RBBM | AHB bus error | %s | addr=%x | ports=%x:%x\n",
  882. reg & (1 << 28) ? "WRITE" : "READ",
  883. (reg & 0xFFFFF) >> 2,
  884. (reg >> 20) & 0x3,
  885. (reg >> 24) & 0xF);
  886. /* Clear the error */
  887. kgsl_regwrite(device, A3XX_RBBM_AHB_CMD, (1 << 3));
  888. break;
  889. }
  890. case A3XX_INT_RBBM_ATB_BUS_OVERFLOW:
  891. dev_crit_ratelimited(device->dev,
  892. "RBBM: ATB bus oveflow\n");
  893. break;
  894. case A3XX_INT_CP_T0_PACKET_IN_IB:
  895. dev_crit_ratelimited(device->dev,
  896. "ringbuffer TO packet in IB interrupt\n");
  897. break;
  898. case A3XX_INT_CP_OPCODE_ERROR:
  899. dev_crit_ratelimited(device->dev,
  900. "ringbuffer opcode error interrupt\n");
  901. break;
  902. case A3XX_INT_CP_RESERVED_BIT_ERROR:
  903. dev_crit_ratelimited(device->dev,
  904. "ringbuffer reserved bit error interrupt\n");
  905. break;
  906. case A3XX_INT_CP_HW_FAULT:
  907. kgsl_regread(device, A3XX_CP_HW_FAULT, &reg);
  908. dev_crit_ratelimited(device->dev,
  909. "CP | Ringbuffer HW fault | status=%x\n",
  910. reg);
  911. break;
  912. case A3XX_INT_CP_REG_PROTECT_FAULT:
  913. kgsl_regread(device, A3XX_CP_PROTECT_STATUS, &reg);
  914. dev_crit_ratelimited(device->dev,
  915. "CP | Protected mode error| %s | addr=%x\n",
  916. reg & (1 << 24) ? "WRITE" : "READ",
  917. (reg & 0xFFFFF) >> 2);
  918. break;
  919. case A3XX_INT_CP_AHB_ERROR_HALT:
  920. dev_crit_ratelimited(device->dev,
  921. "ringbuffer AHB error interrupt\n");
  922. break;
  923. case A3XX_INT_UCHE_OOB_ACCESS:
  924. dev_crit_ratelimited(device->dev,
  925. "UCHE: Out of bounds access\n");
  926. break;
  927. default:
  928. dev_crit_ratelimited(device->dev, "Unknown interrupt\n");
  929. }
  930. }
  931. static const struct adreno_irq_funcs a3xx_irq_funcs[32] = {
  932. ADRENO_IRQ_CALLBACK(NULL), /* 0 - RBBM_GPU_IDLE */
  933. ADRENO_IRQ_CALLBACK(a3xx_err_callback), /* 1 - RBBM_AHB_ERROR */
  934. ADRENO_IRQ_CALLBACK(NULL), /* 2 - RBBM_REG_TIMEOUT */
  935. ADRENO_IRQ_CALLBACK(NULL), /* 3 - RBBM_ME_MS_TIMEOUT */
  936. ADRENO_IRQ_CALLBACK(NULL), /* 4 - RBBM_PFP_MS_TIMEOUT */
  937. ADRENO_IRQ_CALLBACK(a3xx_err_callback), /* 5 - RBBM_ATB_BUS_OVERFLOW */
  938. ADRENO_IRQ_CALLBACK(NULL), /* 6 - RBBM_VFD_ERROR */
  939. ADRENO_IRQ_CALLBACK(NULL), /* 7 - CP_SW */
  940. ADRENO_IRQ_CALLBACK(a3xx_err_callback), /* 8 - CP_T0_PACKET_IN_IB */
  941. ADRENO_IRQ_CALLBACK(a3xx_err_callback), /* 9 - CP_OPCODE_ERROR */
  942. /* 10 - CP_RESERVED_BIT_ERROR */
  943. ADRENO_IRQ_CALLBACK(a3xx_err_callback),
  944. ADRENO_IRQ_CALLBACK(a3xx_err_callback), /* 11 - CP_HW_FAULT */
  945. ADRENO_IRQ_CALLBACK(NULL), /* 12 - CP_DMA */
  946. ADRENO_IRQ_CALLBACK(adreno_cp_callback), /* 13 - CP_IB2_INT */
  947. ADRENO_IRQ_CALLBACK(adreno_cp_callback), /* 14 - CP_IB1_INT */
  948. ADRENO_IRQ_CALLBACK(adreno_cp_callback), /* 15 - CP_RB_INT */
  949. /* 16 - CP_REG_PROTECT_FAULT */
  950. ADRENO_IRQ_CALLBACK(a3xx_err_callback),
  951. ADRENO_IRQ_CALLBACK(NULL), /* 17 - CP_RB_DONE_TS */
  952. ADRENO_IRQ_CALLBACK(NULL), /* 18 - CP_VS_DONE_TS */
  953. ADRENO_IRQ_CALLBACK(NULL), /* 19 - CP_PS_DONE_TS */
  954. ADRENO_IRQ_CALLBACK(adreno_cp_callback), /* 20 - CP_CACHE_FLUSH_TS */
  955. /* 21 - CP_AHB_ERROR_FAULT */
  956. ADRENO_IRQ_CALLBACK(a3xx_err_callback),
  957. ADRENO_IRQ_CALLBACK(NULL), /* 22 - Unused */
  958. ADRENO_IRQ_CALLBACK(NULL), /* 23 - Unused */
  959. /* 24 - MISC_HANG_DETECT */
  960. ADRENO_IRQ_CALLBACK(adreno_hang_int_callback),
  961. ADRENO_IRQ_CALLBACK(a3xx_err_callback), /* 25 - UCHE_OOB_ACCESS */
  962. };
  963. static struct {
  964. u32 reg;
  965. u32 base;
  966. u32 count;
  967. } a3xx_protected_blocks[] = {
  968. /* RBBM */
  969. { A3XX_CP_PROTECT_REG_0, 0x0018, 0 },
  970. { A3XX_CP_PROTECT_REG_0 + 1, 0x0020, 2 },
  971. { A3XX_CP_PROTECT_REG_0 + 2, 0x0033, 0 },
  972. { A3XX_CP_PROTECT_REG_0 + 3, 0x0042, 0 },
  973. { A3XX_CP_PROTECT_REG_0 + 4, 0x0050, 4 },
  974. { A3XX_CP_PROTECT_REG_0 + 5, 0x0063, 0 },
  975. { A3XX_CP_PROTECT_REG_0 + 6, 0x0100, 4 },
  976. /* CP */
  977. { A3XX_CP_PROTECT_REG_0 + 7, 0x01c0, 5 },
  978. { A3XX_CP_PROTECT_REG_0 + 8, 0x01ec, 1 },
  979. { A3XX_CP_PROTECT_REG_0 + 9, 0x01f6, 1 },
  980. { A3XX_CP_PROTECT_REG_0 + 10, 0x01f8, 2 },
  981. { A3XX_CP_PROTECT_REG_0 + 11, 0x045e, 2 },
  982. { A3XX_CP_PROTECT_REG_0 + 12, 0x0460, 4 },
  983. /* RB */
  984. { A3XX_CP_PROTECT_REG_0 + 13, 0x0cc0, 0 },
  985. /* VBIF */
  986. { A3XX_CP_PROTECT_REG_0 + 14, 0x3000, 6 },
  987. /*
  988. * SMMU
  989. * For A3xx, base offset for smmu region is 0xa000 and length is
  990. * 0x1000 bytes. Offset must be in dword and length of the block
  991. * must be ilog2(dword length).
  992. * 0xa000 >> 2 = 0x2800, ilog2(0x1000 >> 2) = 10.
  993. */
  994. { A3XX_CP_PROTECT_REG_0 + 15, 0x2800, 10 },
  995. /* There are no remaining protected mode registers for a3xx */
  996. };
  997. static void a3xx_protect_init(struct kgsl_device *device)
  998. {
  999. int i;
  1000. kgsl_regwrite(device, A3XX_CP_PROTECT_CTRL, 0x00000007);
  1001. for (i = 0; i < ARRAY_SIZE(a3xx_protected_blocks); i++) {
  1002. u32 val = 0x60000000 |
  1003. (a3xx_protected_blocks[i].count << 24) |
  1004. (a3xx_protected_blocks[i].base << 2);
  1005. kgsl_regwrite(device, a3xx_protected_blocks[i].reg, val);
  1006. }
  1007. }
  1008. bool a3xx_gx_is_on(struct adreno_device *adreno_dev)
  1009. {
  1010. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  1011. struct kgsl_pwrctrl *pwr = &device->pwrctrl;
  1012. bool gdsc_on, clk_on;
  1013. clk_on = __clk_is_enabled(pwr->grp_clks[0]);
  1014. gdsc_on = regulator_is_enabled(pwr->gx_gdsc);
  1015. return (gdsc_on & clk_on);
  1016. }
  1017. static int a3xx_start(struct adreno_device *adreno_dev)
  1018. {
  1019. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  1020. const struct adreno_a3xx_core *a3xx_core = to_a3xx_core(adreno_dev);
  1021. int ret;
  1022. ret = kgsl_mmu_start(device);
  1023. if (ret)
  1024. return ret;
  1025. adreno_get_bus_counters(adreno_dev);
  1026. adreno_perfcounter_restore(adreno_dev);
  1027. if (adreno_dev->soft_ft_regs)
  1028. memset(adreno_dev->soft_ft_regs, 0,
  1029. adreno_dev->soft_ft_count << 2);
  1030. /* Set up VBIF registers from the GPU core definition */
  1031. kgsl_regmap_multi_write(&device->regmap, a3xx_core->vbif,
  1032. a3xx_core->vbif_count);
  1033. /* Make all blocks contribute to the GPU BUSY perf counter */
  1034. kgsl_regwrite(device, A3XX_RBBM_GPU_BUSY_MASKED, 0xFFFFFFFF);
  1035. /* Tune the hystersis counters for SP and CP idle detection */
  1036. kgsl_regwrite(device, A3XX_RBBM_SP_HYST_CNT, 0x10);
  1037. kgsl_regwrite(device, A3XX_RBBM_WAIT_IDLE_CLOCKS_CTL, 0x10);
  1038. /*
  1039. * Enable the RBBM error reporting bits. This lets us get
  1040. * useful information on failure
  1041. */
  1042. kgsl_regwrite(device, A3XX_RBBM_AHB_CTL0, 0x00000001);
  1043. /* Enable AHB error reporting */
  1044. kgsl_regwrite(device, A3XX_RBBM_AHB_CTL1, 0xA6FFFFFF);
  1045. /* Turn on the power counters */
  1046. kgsl_regwrite(device, A3XX_RBBM_RBBM_CTL, 0x00030000);
  1047. /*
  1048. * Turn on hang detection - this spews a lot of useful information
  1049. * into the RBBM registers on a hang
  1050. */
  1051. kgsl_regwrite(device, A3XX_RBBM_INTERFACE_HANG_INT_CTL,
  1052. (1 << 16) | 0xFFF);
  1053. /* Enable 64-byte cacheline size. HW Default is 32-byte (0x000000E0). */
  1054. kgsl_regwrite(device, A3XX_UCHE_CACHE_MODE_CONTROL_REG, 0x00000001);
  1055. /* Enable VFD to access most of the UCHE (7 ways out of 8) */
  1056. kgsl_regwrite(device, A3XX_UCHE_CACHE_WAYS_VFD, 0x07);
  1057. /* Enable Clock gating */
  1058. kgsl_regwrite(device, A3XX_RBBM_CLOCK_CTL, A3XX_RBBM_CLOCK_CTL_DEFAULT);
  1059. /* Turn on protection */
  1060. a3xx_protect_init(device);
  1061. /* Turn on performance counters */
  1062. kgsl_regwrite(device, A3XX_RBBM_PERFCTR_CTL, 0x01);
  1063. kgsl_regwrite(device, A3XX_CP_DEBUG, A3XX_CP_DEBUG_DEFAULT);
  1064. /* CP ROQ queue sizes (bytes) - RB:16, ST:16, IB1:32, IB2:64 */
  1065. kgsl_regwrite(device, A3XX_CP_QUEUE_THRESHOLDS, 0x000E0602);
  1066. return 0;
  1067. }
  1068. /* Register offset defines for A3XX */
  1069. static unsigned int a3xx_register_offsets[ADRENO_REG_REGISTER_MAX] = {
  1070. ADRENO_REG_DEFINE(ADRENO_REG_CP_RB_BASE, A3XX_CP_RB_BASE),
  1071. ADRENO_REG_DEFINE(ADRENO_REG_CP_RB_BASE_HI, ADRENO_REG_SKIP),
  1072. ADRENO_REG_DEFINE(ADRENO_REG_CP_RB_RPTR, A3XX_CP_RB_RPTR),
  1073. ADRENO_REG_DEFINE(ADRENO_REG_CP_RB_WPTR, A3XX_CP_RB_WPTR),
  1074. ADRENO_REG_DEFINE(ADRENO_REG_CP_ME_CNTL, A3XX_CP_ME_CNTL),
  1075. ADRENO_REG_DEFINE(ADRENO_REG_CP_RB_CNTL, A3XX_CP_RB_CNTL),
  1076. ADRENO_REG_DEFINE(ADRENO_REG_CP_IB1_BASE, A3XX_CP_IB1_BASE),
  1077. ADRENO_REG_DEFINE(ADRENO_REG_CP_IB1_BASE_HI, ADRENO_REG_SKIP),
  1078. ADRENO_REG_DEFINE(ADRENO_REG_CP_IB1_BUFSZ, A3XX_CP_IB1_BUFSZ),
  1079. ADRENO_REG_DEFINE(ADRENO_REG_CP_IB2_BASE, A3XX_CP_IB2_BASE),
  1080. ADRENO_REG_DEFINE(ADRENO_REG_CP_IB2_BASE_HI, ADRENO_REG_SKIP),
  1081. ADRENO_REG_DEFINE(ADRENO_REG_CP_IB2_BUFSZ, A3XX_CP_IB2_BUFSZ),
  1082. ADRENO_REG_DEFINE(ADRENO_REG_CP_TIMESTAMP, A3XX_CP_SCRATCH_REG0),
  1083. ADRENO_REG_DEFINE(ADRENO_REG_CP_SCRATCH_REG6, A3XX_CP_SCRATCH_REG6),
  1084. ADRENO_REG_DEFINE(ADRENO_REG_CP_SCRATCH_REG7, A3XX_CP_SCRATCH_REG7),
  1085. ADRENO_REG_DEFINE(ADRENO_REG_CP_PROTECT_REG_0, A3XX_CP_PROTECT_REG_0),
  1086. ADRENO_REG_DEFINE(ADRENO_REG_RBBM_STATUS, A3XX_RBBM_STATUS),
  1087. ADRENO_REG_DEFINE(ADRENO_REG_RBBM_PERFCTR_PWR_1_LO,
  1088. A3XX_RBBM_PERFCTR_PWR_1_LO),
  1089. ADRENO_REG_DEFINE(ADRENO_REG_RBBM_INT_0_MASK, A3XX_RBBM_INT_0_MASK),
  1090. ADRENO_REG_DEFINE(ADRENO_REG_RBBM_CLOCK_CTL, A3XX_RBBM_CLOCK_CTL),
  1091. ADRENO_REG_DEFINE(ADRENO_REG_PA_SC_AA_CONFIG, A3XX_PA_SC_AA_CONFIG),
  1092. ADRENO_REG_DEFINE(ADRENO_REG_RBBM_PM_OVERRIDE2, A3XX_RBBM_PM_OVERRIDE2),
  1093. ADRENO_REG_DEFINE(ADRENO_REG_SQ_GPR_MANAGEMENT, A3XX_SQ_GPR_MANAGEMENT),
  1094. ADRENO_REG_DEFINE(ADRENO_REG_SQ_INST_STORE_MANAGEMENT,
  1095. A3XX_SQ_INST_STORE_MANAGEMENT),
  1096. ADRENO_REG_DEFINE(ADRENO_REG_TP0_CHICKEN, A3XX_TP0_CHICKEN),
  1097. ADRENO_REG_DEFINE(ADRENO_REG_RBBM_SW_RESET_CMD, A3XX_RBBM_SW_RESET_CMD),
  1098. };
  1099. static int _load_firmware(struct kgsl_device *device, const char *fwfile,
  1100. void **buf, int *len)
  1101. {
  1102. const struct firmware *fw = NULL;
  1103. int ret;
  1104. ret = request_firmware(&fw, fwfile, &device->pdev->dev);
  1105. if (ret) {
  1106. dev_err(&device->pdev->dev, "request_firmware(%s) failed: %d\n",
  1107. fwfile, ret);
  1108. return ret;
  1109. }
  1110. if (!fw)
  1111. return -EINVAL;
  1112. *buf = devm_kmemdup(&device->pdev->dev, fw->data, fw->size, GFP_KERNEL);
  1113. *len = fw->size;
  1114. release_firmware(fw);
  1115. return (*buf) ? 0 : -ENOMEM;
  1116. }
  1117. static int a3xx_microcode_read(struct adreno_device *adreno_dev)
  1118. {
  1119. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  1120. struct adreno_firmware *pm4_fw = ADRENO_FW(adreno_dev, ADRENO_FW_PM4);
  1121. struct adreno_firmware *pfp_fw = ADRENO_FW(adreno_dev, ADRENO_FW_PFP);
  1122. const struct adreno_a3xx_core *a3xx_core = to_a3xx_core(adreno_dev);
  1123. if (pm4_fw->fwvirt == NULL) {
  1124. int len;
  1125. void *ptr;
  1126. int ret = _load_firmware(device,
  1127. a3xx_core->pm4fw_name, &ptr, &len);
  1128. if (ret) {
  1129. dev_err(device->dev, "Failed to read pm4 ucode %s\n",
  1130. a3xx_core->pm4fw_name);
  1131. return ret;
  1132. }
  1133. /* PM4 size is 3 dword aligned plus 1 dword of version */
  1134. if (len % ((sizeof(uint32_t) * 3)) != sizeof(uint32_t)) {
  1135. dev_err(device->dev,
  1136. "Bad pm4 microcode size: %d\n",
  1137. len);
  1138. kfree(ptr);
  1139. return -ENOMEM;
  1140. }
  1141. pm4_fw->size = len / sizeof(uint32_t);
  1142. pm4_fw->fwvirt = ptr;
  1143. pm4_fw->version = pm4_fw->fwvirt[1];
  1144. }
  1145. if (pfp_fw->fwvirt == NULL) {
  1146. int len;
  1147. void *ptr;
  1148. int ret = _load_firmware(device,
  1149. a3xx_core->pfpfw_name, &ptr, &len);
  1150. if (ret) {
  1151. dev_err(device->dev, "Failed to read pfp ucode %s\n",
  1152. a3xx_core->pfpfw_name);
  1153. return ret;
  1154. }
  1155. /* PFP size shold be dword aligned */
  1156. if (len % sizeof(uint32_t) != 0) {
  1157. dev_err(device->dev,
  1158. "Bad PFP microcode size: %d\n",
  1159. len);
  1160. kfree(ptr);
  1161. return -ENOMEM;
  1162. }
  1163. pfp_fw->size = len / sizeof(uint32_t);
  1164. pfp_fw->fwvirt = ptr;
  1165. pfp_fw->version = pfp_fw->fwvirt[1];
  1166. }
  1167. return 0;
  1168. }
  1169. static void a3xx_microcode_load(struct adreno_device *adreno_dev)
  1170. {
  1171. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  1172. size_t pm4_size = adreno_dev->fw[ADRENO_FW_PM4].size;
  1173. size_t pfp_size = adreno_dev->fw[ADRENO_FW_PFP].size;
  1174. int i;
  1175. /* load the CP ucode using AHB writes */
  1176. kgsl_regwrite(device, A3XX_CP_ME_RAM_WADDR, 0);
  1177. for (i = 1; i < pm4_size; i++)
  1178. kgsl_regwrite(device, A3XX_CP_ME_RAM_DATA,
  1179. adreno_dev->fw[ADRENO_FW_PM4].fwvirt[i]);
  1180. kgsl_regwrite(device, A3XX_CP_PFP_UCODE_ADDR, 0);
  1181. for (i = 1; i < pfp_size; i++)
  1182. kgsl_regwrite(device, A3XX_CP_PFP_UCODE_DATA,
  1183. adreno_dev->fw[ADRENO_FW_PFP].fwvirt[i]);
  1184. }
  1185. static u64 a3xx_read_alwayson(struct adreno_device *adreno_dev)
  1186. {
  1187. /* A3XX does not have a always on timer */
  1188. return 0;
  1189. }
  1190. static irqreturn_t a3xx_irq_handler(struct adreno_device *adreno_dev)
  1191. {
  1192. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  1193. irqreturn_t ret;
  1194. u32 status;
  1195. /* Get the current interrupt status */
  1196. kgsl_regread(device, A3XX_RBBM_INT_0_STATUS, &status);
  1197. /*
  1198. * Clear all the interrupt bits except A3XX_INT_RBBM_AHB_ERROR.
  1199. * The interrupt will stay asserted until it is cleared by the handler
  1200. * so don't touch it yet to avoid a storm
  1201. */
  1202. kgsl_regwrite(device, A3XX_RBBM_INT_CLEAR_CMD,
  1203. status & ~A3XX_INT_RBBM_AHB_ERROR);
  1204. /* Call the helper to execute the callbacks */
  1205. ret = adreno_irq_callbacks(adreno_dev, a3xx_irq_funcs, status);
  1206. trace_kgsl_a3xx_irq_status(adreno_dev, status);
  1207. /* Now clear AHB_ERROR if it was set */
  1208. if (status & A3XX_INT_RBBM_AHB_ERROR)
  1209. kgsl_regwrite(device, A3XX_RBBM_INT_CLEAR_CMD,
  1210. A3XX_INT_RBBM_AHB_ERROR);
  1211. return ret;
  1212. }
  1213. static bool a3xx_hw_isidle(struct adreno_device *adreno_dev)
  1214. {
  1215. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  1216. u32 status;
  1217. kgsl_regread(device, A3XX_RBBM_STATUS, &status);
  1218. if (status & 0x7ffffffe)
  1219. return false;
  1220. kgsl_regread(device, A3XX_RBBM_INT_0_STATUS, &status);
  1221. /* Return busy if a interrupt is pending */
  1222. return !((status & adreno_dev->irq_mask) ||
  1223. atomic_read(&adreno_dev->pending_irq_refcnt));
  1224. }
  1225. static int a3xx_clear_pending_transactions(struct adreno_device *adreno_dev)
  1226. {
  1227. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  1228. u32 mask = A30X_VBIF_XIN_HALT_CTRL0_MASK;
  1229. int ret;
  1230. kgsl_regwrite(device, A3XX_VBIF_XIN_HALT_CTRL0, mask);
  1231. ret = adreno_wait_for_halt_ack(device, A3XX_VBIF_XIN_HALT_CTRL1, mask);
  1232. kgsl_regwrite(device, A3XX_VBIF_XIN_HALT_CTRL0, 0);
  1233. return ret;
  1234. }
  1235. static bool a3xx_is_hw_collapsible(struct adreno_device *adreno_dev)
  1236. {
  1237. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  1238. /*
  1239. * Skip power collapse for A304, if power ctrl flag is set to
  1240. * non zero. As A304 soft_reset will not work, power collapse
  1241. * needs to disable to avoid soft_reset.
  1242. */
  1243. if (adreno_is_a304(adreno_dev) && device->pwrctrl.ctrl_flags)
  1244. return false;
  1245. return adreno_isidle(adreno_dev);
  1246. }
  1247. static void a3xx_power_stats(struct adreno_device *adreno_dev,
  1248. struct kgsl_power_stats *stats)
  1249. {
  1250. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  1251. struct adreno_busy_data *busy = &adreno_dev->busy_data;
  1252. s64 freq = kgsl_pwrctrl_active_freq(&device->pwrctrl) / 1000000;
  1253. u64 gpu_busy;
  1254. /* Set the GPU busy counter for frequency scaling */
  1255. gpu_busy = counter_delta(device, A3XX_RBBM_PERFCTR_PWR_1_LO,
  1256. &busy->gpu_busy);
  1257. stats->busy_time = gpu_busy / freq;
  1258. if (!device->pwrctrl.bus_control)
  1259. return;
  1260. stats->ram_time = counter_delta(device, adreno_dev->ram_cycles_lo,
  1261. &busy->bif_ram_cycles);
  1262. stats->ram_wait = counter_delta(device, adreno_dev->starved_ram_lo,
  1263. &busy->bif_starved_ram);
  1264. }
  1265. static int a3xx_setproperty(struct kgsl_device_private *dev_priv,
  1266. u32 type, void __user *value, u32 sizebytes)
  1267. {
  1268. struct kgsl_device *device = dev_priv->device;
  1269. struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
  1270. u32 enable;
  1271. if (type != KGSL_PROP_PWRCTRL)
  1272. return -ENODEV;
  1273. if (sizebytes != sizeof(enable))
  1274. return -EINVAL;
  1275. if (copy_from_user(&enable, value, sizeof(enable)))
  1276. return -EFAULT;
  1277. mutex_lock(&device->mutex);
  1278. if (enable) {
  1279. device->pwrctrl.ctrl_flags = 0;
  1280. if (!adreno_active_count_get(adreno_dev)) {
  1281. a3xx_soft_fault_detect_start(adreno_dev);
  1282. adreno_active_count_put(adreno_dev);
  1283. }
  1284. kgsl_pwrscale_enable(device);
  1285. } else {
  1286. kgsl_pwrctrl_change_state(device, KGSL_STATE_ACTIVE);
  1287. device->pwrctrl.ctrl_flags = KGSL_PWR_ON;
  1288. a3xx_soft_fault_detect_stop(adreno_dev);
  1289. kgsl_pwrscale_disable(device, true);
  1290. }
  1291. mutex_unlock(&device->mutex);
  1292. return 0;
  1293. }
  1294. const struct adreno_gpudev adreno_a3xx_gpudev = {
  1295. .reg_offsets = a3xx_register_offsets,
  1296. .irq_handler = a3xx_irq_handler,
  1297. .probe = a3xx_probe,
  1298. .rb_start = a3xx_rb_start,
  1299. .init = a3xx_init,
  1300. .start = a3xx_start,
  1301. .snapshot = a3xx_snapshot,
  1302. .read_alwayson = a3xx_read_alwayson,
  1303. .hw_isidle = a3xx_hw_isidle,
  1304. .power_ops = &adreno_power_operations,
  1305. .clear_pending_transactions = a3xx_clear_pending_transactions,
  1306. .ringbuffer_submitcmd = a3xx_ringbuffer_submitcmd,
  1307. .is_hw_collapsible = a3xx_is_hw_collapsible,
  1308. .power_stats = a3xx_power_stats,
  1309. .setproperty = a3xx_setproperty,
  1310. .remove = a3xx_remove,
  1311. .gx_is_on = a3xx_gx_is_on,
  1312. };