adreno_a3xx_ringbuffer.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
  4. * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
  5. */
  6. #include "adreno.h"
  7. #include "adreno_a3xx.h"
  8. #include "adreno_pm4types.h"
  9. #include "adreno_ringbuffer.h"
  10. #include "adreno_trace.h"
  11. #include "kgsl_trace.h"
  12. static int a3xx_wait_reg(unsigned int *cmds, unsigned int addr,
  13. unsigned int val, unsigned int mask,
  14. unsigned int interval)
  15. {
  16. cmds[0] = cp_type3_packet(CP_WAIT_REG_EQ, 4);
  17. cmds[1] = addr;
  18. cmds[2] = val;
  19. cmds[3] = mask;
  20. cmds[4] = interval;
  21. return 5;
  22. }
  23. static int a3xx_vbif_lock(unsigned int *cmds)
  24. {
  25. int count;
  26. /*
  27. * glue commands together until next
  28. * WAIT_FOR_ME
  29. */
  30. count = a3xx_wait_reg(cmds, A3XX_CP_WFI_PEND_CTR,
  31. 1, 0xFFFFFFFF, 0xF);
  32. /* MMU-500 VBIF stall */
  33. cmds[count++] = cp_type3_packet(CP_REG_RMW, 3);
  34. cmds[count++] = A3XX_VBIF_DDR_OUTPUT_RECOVERABLE_HALT_CTRL0;
  35. /* AND to unmask the HALT bit */
  36. cmds[count++] = ~(VBIF_RECOVERABLE_HALT_CTRL);
  37. /* OR to set the HALT bit */
  38. cmds[count++] = 0x1;
  39. /* Wait for acknowledgment */
  40. count += a3xx_wait_reg(&cmds[count],
  41. A3XX_VBIF_DDR_OUTPUT_RECOVERABLE_HALT_CTRL1,
  42. 1, 0xFFFFFFFF, 0xF);
  43. return count;
  44. }
  45. static int a3xx_vbif_unlock(unsigned int *cmds)
  46. {
  47. /* MMU-500 VBIF unstall */
  48. cmds[0] = cp_type3_packet(CP_REG_RMW, 3);
  49. cmds[1] = A3XX_VBIF_DDR_OUTPUT_RECOVERABLE_HALT_CTRL0;
  50. /* AND to unmask the HALT bit */
  51. cmds[2] = ~(VBIF_RECOVERABLE_HALT_CTRL);
  52. /* OR to reset the HALT bit */
  53. cmds[3] = 0;
  54. /* release all commands since _vbif_lock() with wait_for_me */
  55. cmds[4] = cp_type3_packet(CP_WAIT_FOR_ME, 1);
  56. cmds[5] = 0;
  57. return 6;
  58. }
  59. #define A3XX_GPU_OFFSET 0xa000
  60. static int a3xx_cp_smmu_reg(unsigned int *cmds,
  61. u32 reg,
  62. unsigned int num)
  63. {
  64. cmds[0] = cp_type3_packet(CP_REG_WR_NO_CTXT, num + 1);
  65. cmds[1] = (A3XX_GPU_OFFSET + reg) >> 2;
  66. return 2;
  67. }
  68. /* This function is only needed for A3xx targets */
  69. static int a3xx_tlbiall(unsigned int *cmds)
  70. {
  71. unsigned int tlbstatus = (A3XX_GPU_OFFSET +
  72. KGSL_IOMMU_CTX_TLBSTATUS) >> 2;
  73. int count;
  74. count = a3xx_cp_smmu_reg(cmds, KGSL_IOMMU_CTX_TLBIALL, 1);
  75. cmds[count++] = 1;
  76. count += a3xx_cp_smmu_reg(&cmds[count], KGSL_IOMMU_CTX_TLBSYNC, 1);
  77. cmds[count++] = 0;
  78. count += a3xx_wait_reg(&cmds[count], tlbstatus, 0,
  79. KGSL_IOMMU_CTX_TLBSTATUS_SACTIVE, 0xF);
  80. return count;
  81. }
  82. /* offset at which a nop command is placed in setstate */
  83. #define KGSL_IOMMU_SETSTATE_NOP_OFFSET 1024
  84. static int a3xx_rb_pagetable_switch(struct adreno_device *adreno_dev,
  85. struct kgsl_pagetable *pagetable, u32 *cmds)
  86. {
  87. u64 ttbr0 = kgsl_mmu_pagetable_get_ttbr0(pagetable);
  88. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  89. struct kgsl_iommu *iommu = KGSL_IOMMU(device);
  90. int count = 0;
  91. /* Skip pagetable switch if current context is using default PT. */
  92. if (pagetable == device->mmu.defaultpagetable)
  93. return 0;
  94. /*
  95. * Adding an indirect buffer ensures that the prefetch stalls until
  96. * the commands in indirect buffer have completed. We need to stall
  97. * prefetch with a nop indirect buffer when updating pagetables
  98. * because it provides stabler synchronization.
  99. */
  100. cmds[count++] = cp_type3_packet(CP_WAIT_FOR_ME, 1);
  101. cmds[count++] = 0;
  102. cmds[count++] = cp_type3_packet(CP_INDIRECT_BUFFER_PFE, 2);
  103. cmds[count++] = lower_32_bits(iommu->setstate->gpuaddr);
  104. cmds[count++] = 2;
  105. cmds[count++] = cp_type3_packet(CP_WAIT_FOR_IDLE, 1);
  106. cmds[count++] = 0;
  107. cmds[count++] = cp_type3_packet(CP_WAIT_FOR_ME, 1);
  108. cmds[count++] = 0;
  109. count += a3xx_vbif_lock(&cmds[count]);
  110. count += a3xx_cp_smmu_reg(&cmds[count], KGSL_IOMMU_CTX_TTBR0, 2);
  111. cmds[count++] = lower_32_bits(ttbr0);
  112. cmds[count++] = upper_32_bits(ttbr0);
  113. count += a3xx_vbif_unlock(&cmds[count]);
  114. count += a3xx_tlbiall(&cmds[count]);
  115. /* wait for me to finish the TLBI */
  116. cmds[count++] = cp_type3_packet(CP_WAIT_FOR_ME, 1);
  117. cmds[count++] = 0;
  118. cmds[count++] = cp_type3_packet(CP_WAIT_FOR_IDLE, 1);
  119. cmds[count++] = 0;
  120. /* Invalidate the state */
  121. cmds[count++] = cp_type3_packet(CP_INVALIDATE_STATE, 1);
  122. cmds[count++] = 0x7ffff;
  123. return count;
  124. }
  125. #define RB_SOPTIMESTAMP(device, rb) \
  126. MEMSTORE_RB_GPU_ADDR(device, rb, soptimestamp)
  127. #define CTXT_SOPTIMESTAMP(device, drawctxt) \
  128. MEMSTORE_ID_GPU_ADDR(device, (drawctxt)->base.id, soptimestamp)
  129. #define RB_EOPTIMESTAMP(device, rb) \
  130. MEMSTORE_RB_GPU_ADDR(device, rb, eoptimestamp)
  131. #define CTXT_EOPTIMESTAMP(device, drawctxt) \
  132. MEMSTORE_ID_GPU_ADDR(device, (drawctxt)->base.id, eoptimestamp)
  133. int a3xx_ringbuffer_init(struct adreno_device *adreno_dev)
  134. {
  135. adreno_dev->num_ringbuffers = 1;
  136. adreno_dev->cur_rb = &(adreno_dev->ringbuffers[0]);
  137. return adreno_ringbuffer_setup(adreno_dev,
  138. &adreno_dev->ringbuffers[0], 0);
  139. }
  140. #define A3XX_SUBMIT_MAX 55
  141. static int a3xx_ringbuffer_addcmds(struct adreno_device *adreno_dev,
  142. struct adreno_ringbuffer *rb, struct adreno_context *drawctxt,
  143. u32 flags, u32 *in, u32 dwords, u32 timestamp,
  144. struct adreno_submit_time *time)
  145. {
  146. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  147. u32 size = A3XX_SUBMIT_MAX + dwords;
  148. u32 *cmds, index = 0;
  149. u64 profile_gpuaddr;
  150. u32 profile_dwords;
  151. if (adreno_drawctxt_detached(drawctxt))
  152. return -ENOENT;
  153. if (adreno_gpu_fault(adreno_dev) != 0)
  154. return -EPROTO;
  155. rb->timestamp++;
  156. if (drawctxt)
  157. drawctxt->internal_timestamp = rb->timestamp;
  158. cmds = adreno_ringbuffer_allocspace(rb, size);
  159. if (IS_ERR(cmds))
  160. return PTR_ERR(cmds);
  161. /* Identify the start of a command */
  162. cmds[index++] = cp_type3_packet(CP_NOP, 1);
  163. cmds[index++] = drawctxt ? CMD_IDENTIFIER : CMD_INTERNAL_IDENTIFIER;
  164. if (IS_PWRON_FIXUP(flags)) {
  165. cmds[index++] = cp_type3_packet(CP_SET_PROTECTED_MODE, 1);
  166. cmds[index++] = 0;
  167. cmds[index++] = cp_type3_packet(CP_NOP, 1);
  168. cmds[index++] = PWRON_FIXUP_IDENTIFIER;
  169. cmds[index++] = cp_type3_packet(CP_INDIRECT_BUFFER_PFE, 2);
  170. cmds[index++] = lower_32_bits(adreno_dev->pwron_fixup->gpuaddr);
  171. cmds[index++] = adreno_dev->pwron_fixup_dwords;
  172. cmds[index++] = cp_type3_packet(CP_SET_PROTECTED_MODE, 1);
  173. cmds[index++] = 0;
  174. }
  175. profile_gpuaddr = adreno_profile_preib_processing(adreno_dev,
  176. drawctxt, &profile_dwords);
  177. if (profile_gpuaddr) {
  178. cmds[index++] = cp_type3_packet(CP_INDIRECT_BUFFER_PFE, 2);
  179. cmds[index++] = lower_32_bits(profile_gpuaddr);
  180. cmds[index++] = profile_dwords;
  181. }
  182. if (drawctxt) {
  183. cmds[index++] = cp_type3_packet(CP_MEM_WRITE, 2);
  184. cmds[index++] = lower_32_bits(CTXT_SOPTIMESTAMP(device,
  185. drawctxt));
  186. cmds[index++] = timestamp;
  187. }
  188. cmds[index++] = cp_type3_packet(CP_MEM_WRITE, 2);
  189. cmds[index++] = lower_32_bits(RB_SOPTIMESTAMP(device, rb));
  190. cmds[index++] = rb->timestamp;
  191. if (IS_NOTPROTECTED(flags)) {
  192. cmds[index++] = cp_type3_packet(CP_SET_PROTECTED_MODE, 1);
  193. cmds[index++] = 0;
  194. }
  195. memcpy(&cmds[index], in, dwords << 2);
  196. index += dwords;
  197. if (IS_NOTPROTECTED(flags)) {
  198. cmds[index++] = cp_type3_packet(CP_SET_PROTECTED_MODE, 1);
  199. cmds[index++] = 1;
  200. }
  201. /*
  202. * Flush HLSQ lazy updates to make sure there are no resourses pending
  203. * for indirect loads after the timestamp
  204. */
  205. cmds[index++] = cp_type3_packet(CP_EVENT_WRITE, 1);
  206. cmds[index++] = 0x07; /* HLSQ FLUSH */
  207. cmds[index++] = cp_type3_packet(CP_WAIT_FOR_IDLE, 1);
  208. cmds[index++] = 0;
  209. profile_gpuaddr = adreno_profile_postib_processing(adreno_dev,
  210. drawctxt, &profile_dwords);
  211. if (profile_gpuaddr) {
  212. cmds[index++] = cp_type3_packet(CP_INDIRECT_BUFFER_PFE, 2);
  213. cmds[index++] = lower_32_bits(profile_gpuaddr);
  214. cmds[index++] = profile_dwords;
  215. }
  216. /*
  217. * If this is an internal command, just write the ringbuffer timestamp,
  218. * otherwise, write both
  219. */
  220. if (!drawctxt) {
  221. cmds[index++] = cp_type3_packet(CP_EVENT_WRITE, 3);
  222. cmds[index++] = CACHE_FLUSH_TS | (1 << 31);
  223. cmds[index++] = lower_32_bits(RB_EOPTIMESTAMP(device, rb));
  224. cmds[index++] = rb->timestamp;
  225. } else {
  226. cmds[index++] = cp_type3_packet(CP_EVENT_WRITE, 3);
  227. cmds[index++] = CACHE_FLUSH_TS | (1 << 31);
  228. cmds[index++] = lower_32_bits(CTXT_EOPTIMESTAMP(device,
  229. drawctxt));
  230. cmds[index++] = timestamp;
  231. cmds[index++] = cp_type3_packet(CP_EVENT_WRITE, 3);
  232. cmds[index++] = CACHE_FLUSH_TS;
  233. cmds[index++] = lower_32_bits(RB_EOPTIMESTAMP(device, rb));
  234. cmds[index++] = rb->timestamp;
  235. }
  236. /* Trigger a context rollover */
  237. cmds[index++] = cp_type3_packet(CP_SET_CONSTANT, 2);
  238. cmds[index++] = (4 << 16) | (A3XX_HLSQ_CL_KERNEL_GROUP_X_REG - 0x2000);
  239. cmds[index++] = 0;
  240. if (IS_WFI(flags)) {
  241. cmds[index++] = cp_type3_packet(CP_WAIT_FOR_IDLE, 1);
  242. cmds[index++] = 0;
  243. }
  244. /* Adjust the thing for the number of bytes we actually wrote */
  245. rb->_wptr -= (size - index);
  246. kgsl_pwrscale_busy(device);
  247. kgsl_regwrite(device, A3XX_CP_RB_WPTR, rb->_wptr);
  248. rb->wptr = rb->_wptr;
  249. return 0;
  250. }
  251. static int a3xx_rb_context_switch(struct adreno_device *adreno_dev,
  252. struct adreno_ringbuffer *rb,
  253. struct adreno_context *drawctxt)
  254. {
  255. struct kgsl_pagetable *pagetable =
  256. adreno_drawctxt_get_pagetable(drawctxt);
  257. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  258. int count = 0;
  259. u32 cmds[64];
  260. if (adreno_drawctxt_get_pagetable(rb->drawctxt_active) != pagetable)
  261. count += a3xx_rb_pagetable_switch(adreno_dev, pagetable, cmds);
  262. cmds[count++] = cp_type3_packet(CP_NOP, 1);
  263. cmds[count++] = CONTEXT_TO_MEM_IDENTIFIER;
  264. cmds[count++] = cp_type3_packet(CP_MEM_WRITE, 2);
  265. cmds[count++] = lower_32_bits(MEMSTORE_RB_GPU_ADDR(device, rb,
  266. current_context));
  267. cmds[count++] = drawctxt->base.id;
  268. cmds[count++] = cp_type3_packet(CP_MEM_WRITE, 2);
  269. cmds[count++] = lower_32_bits(MEMSTORE_ID_GPU_ADDR(device,
  270. KGSL_MEMSTORE_GLOBAL, current_context));
  271. cmds[count++] = drawctxt->base.id;
  272. cmds[count++] = cp_type0_packet(A3XX_UCHE_CACHE_INVALIDATE0_REG, 2);
  273. cmds[count++] = 0;
  274. cmds[count++] = 0x90000000;
  275. return a3xx_ringbuffer_addcmds(adreno_dev, rb, NULL, F_NOTPROTECTED,
  276. cmds, count, 0, NULL);
  277. }
  278. static int a3xx_drawctxt_switch(struct adreno_device *adreno_dev,
  279. struct adreno_ringbuffer *rb,
  280. struct adreno_context *drawctxt)
  281. {
  282. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  283. if (rb->drawctxt_active == drawctxt)
  284. return 0;
  285. if (kgsl_context_detached(&drawctxt->base))
  286. return -ENOENT;
  287. if (!_kgsl_context_get(&drawctxt->base))
  288. return -ENOENT;
  289. trace_adreno_drawctxt_switch(rb, drawctxt);
  290. a3xx_rb_context_switch(adreno_dev, rb, drawctxt);
  291. /* Release the current drawctxt as soon as the new one is switched */
  292. adreno_put_drawctxt_on_timestamp(device, rb->drawctxt_active,
  293. rb, rb->timestamp);
  294. rb->drawctxt_active = drawctxt;
  295. return 0;
  296. }
  297. #define A3XX_COMMAND_DWORDS 4
  298. int a3xx_ringbuffer_submitcmd(struct adreno_device *adreno_dev,
  299. struct kgsl_drawobj_cmd *cmdobj, u32 flags,
  300. struct adreno_submit_time *time)
  301. {
  302. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  303. struct kgsl_drawobj *drawobj = DRAWOBJ(cmdobj);
  304. struct adreno_context *drawctxt = ADRENO_CONTEXT(drawobj->context);
  305. struct adreno_ringbuffer *rb = drawctxt->rb;
  306. int ret = 0, numibs = 0, index = 0;
  307. u32 *cmds;
  308. /* Count the number of IBs (if we are not skipping) */
  309. if (!IS_SKIP(flags)) {
  310. struct list_head *tmp;
  311. list_for_each(tmp, &cmdobj->cmdlist)
  312. numibs++;
  313. }
  314. cmds = kmalloc((A3XX_COMMAND_DWORDS + (numibs * 4)) << 2, GFP_KERNEL);
  315. if (!cmds) {
  316. ret = -ENOMEM;
  317. goto done;
  318. }
  319. cmds[index++] = cp_type3_packet(CP_NOP, 1);
  320. cmds[index++] = START_IB_IDENTIFIER;
  321. if (numibs) {
  322. struct kgsl_memobj_node *ib;
  323. list_for_each_entry(ib, &cmdobj->cmdlist, node) {
  324. if (ib->priv & MEMOBJ_SKIP ||
  325. (ib->flags & KGSL_CMDLIST_CTXTSWITCH_PREAMBLE
  326. && !IS_PREAMBLE(flags)))
  327. cmds[index++] = cp_type3_packet(CP_NOP, 3);
  328. cmds[index++] =
  329. cp_type3_packet(CP_INDIRECT_BUFFER_PFE, 2);
  330. cmds[index++] = lower_32_bits(ib->gpuaddr);
  331. cmds[index++] = ib->size >> 2;
  332. }
  333. }
  334. cmds[index++] = cp_type3_packet(CP_NOP, 1);
  335. cmds[index++] = END_IB_IDENTIFIER;
  336. ret = a3xx_drawctxt_switch(adreno_dev, rb, drawctxt);
  337. /*
  338. * In the unlikely event of an error in the drawctxt switch,
  339. * treat it like a hang
  340. */
  341. if (ret) {
  342. /*
  343. * It is "normal" to get a -ENOSPC or a -ENOENT. Don't log it,
  344. * the upper layers know how to handle it
  345. */
  346. if (ret != -ENOSPC && ret != -ENOENT)
  347. dev_err(device->dev,
  348. "Unable to switch draw context: %d\n",
  349. ret);
  350. goto done;
  351. }
  352. adreno_drawobj_set_constraint(device, drawobj);
  353. ret = a3xx_ringbuffer_addcmds(adreno_dev, drawctxt->rb, drawctxt,
  354. flags, cmds, index, drawobj->timestamp, NULL);
  355. done:
  356. trace_kgsl_issueibcmds(device, drawctxt->base.id, numibs,
  357. drawobj->timestamp, drawobj->flags, ret, drawctxt->type);
  358. kfree(cmds);
  359. return ret;
  360. }