etnaviv_buffer.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (C) 2014-2018 Etnaviv Project
  4. */
  5. #include <drm/drm_drv.h>
  6. #include "etnaviv_cmdbuf.h"
  7. #include "etnaviv_gpu.h"
  8. #include "etnaviv_gem.h"
  9. #include "etnaviv_mmu.h"
  10. #include "common.xml.h"
  11. #include "state.xml.h"
  12. #include "state_blt.xml.h"
  13. #include "state_hi.xml.h"
  14. #include "state_3d.xml.h"
  15. #include "cmdstream.xml.h"
  16. /*
  17. * Command Buffer helper:
  18. */
  19. static inline void OUT(struct etnaviv_cmdbuf *buffer, u32 data)
  20. {
  21. u32 *vaddr = (u32 *)buffer->vaddr;
  22. BUG_ON(buffer->user_size >= buffer->size);
  23. vaddr[buffer->user_size / 4] = data;
  24. buffer->user_size += 4;
  25. }
  26. static inline void CMD_LOAD_STATE(struct etnaviv_cmdbuf *buffer,
  27. u32 reg, u32 value)
  28. {
  29. u32 index = reg >> VIV_FE_LOAD_STATE_HEADER_OFFSET__SHR;
  30. buffer->user_size = ALIGN(buffer->user_size, 8);
  31. /* write a register via cmd stream */
  32. OUT(buffer, VIV_FE_LOAD_STATE_HEADER_OP_LOAD_STATE |
  33. VIV_FE_LOAD_STATE_HEADER_COUNT(1) |
  34. VIV_FE_LOAD_STATE_HEADER_OFFSET(index));
  35. OUT(buffer, value);
  36. }
  37. static inline void CMD_END(struct etnaviv_cmdbuf *buffer)
  38. {
  39. buffer->user_size = ALIGN(buffer->user_size, 8);
  40. OUT(buffer, VIV_FE_END_HEADER_OP_END);
  41. }
  42. static inline void CMD_WAIT(struct etnaviv_cmdbuf *buffer)
  43. {
  44. buffer->user_size = ALIGN(buffer->user_size, 8);
  45. OUT(buffer, VIV_FE_WAIT_HEADER_OP_WAIT | 200);
  46. }
  47. static inline void CMD_LINK(struct etnaviv_cmdbuf *buffer,
  48. u16 prefetch, u32 address)
  49. {
  50. buffer->user_size = ALIGN(buffer->user_size, 8);
  51. OUT(buffer, VIV_FE_LINK_HEADER_OP_LINK |
  52. VIV_FE_LINK_HEADER_PREFETCH(prefetch));
  53. OUT(buffer, address);
  54. }
  55. static inline void CMD_STALL(struct etnaviv_cmdbuf *buffer,
  56. u32 from, u32 to)
  57. {
  58. buffer->user_size = ALIGN(buffer->user_size, 8);
  59. OUT(buffer, VIV_FE_STALL_HEADER_OP_STALL);
  60. OUT(buffer, VIV_FE_STALL_TOKEN_FROM(from) | VIV_FE_STALL_TOKEN_TO(to));
  61. }
  62. static inline void CMD_SEM(struct etnaviv_cmdbuf *buffer, u32 from, u32 to)
  63. {
  64. CMD_LOAD_STATE(buffer, VIVS_GL_SEMAPHORE_TOKEN,
  65. VIVS_GL_SEMAPHORE_TOKEN_FROM(from) |
  66. VIVS_GL_SEMAPHORE_TOKEN_TO(to));
  67. }
  68. static void etnaviv_cmd_select_pipe(struct etnaviv_gpu *gpu,
  69. struct etnaviv_cmdbuf *buffer, u8 pipe)
  70. {
  71. u32 flush = 0;
  72. lockdep_assert_held(&gpu->lock);
  73. /*
  74. * This assumes that if we're switching to 2D, we're switching
  75. * away from 3D, and vice versa. Hence, if we're switching to
  76. * the 2D core, we need to flush the 3D depth and color caches,
  77. * otherwise we need to flush the 2D pixel engine cache.
  78. */
  79. if (gpu->exec_state == ETNA_PIPE_2D)
  80. flush = VIVS_GL_FLUSH_CACHE_PE2D;
  81. else if (gpu->exec_state == ETNA_PIPE_3D)
  82. flush = VIVS_GL_FLUSH_CACHE_DEPTH | VIVS_GL_FLUSH_CACHE_COLOR;
  83. CMD_LOAD_STATE(buffer, VIVS_GL_FLUSH_CACHE, flush);
  84. CMD_SEM(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
  85. CMD_STALL(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
  86. CMD_LOAD_STATE(buffer, VIVS_GL_PIPE_SELECT,
  87. VIVS_GL_PIPE_SELECT_PIPE(pipe));
  88. }
  89. static void etnaviv_buffer_dump(struct etnaviv_gpu *gpu,
  90. struct etnaviv_cmdbuf *buf, u32 off, u32 len)
  91. {
  92. u32 size = buf->size;
  93. u32 *ptr = buf->vaddr + off;
  94. dev_info(gpu->dev, "virt %p phys 0x%08x free 0x%08x\n",
  95. ptr, etnaviv_cmdbuf_get_va(buf,
  96. &gpu->mmu_context->cmdbuf_mapping) +
  97. off, size - len * 4 - off);
  98. print_hex_dump(KERN_INFO, "cmd ", DUMP_PREFIX_OFFSET, 16, 4,
  99. ptr, len * 4, 0);
  100. }
  101. /*
  102. * Safely replace the WAIT of a waitlink with a new command and argument.
  103. * The GPU may be executing this WAIT while we're modifying it, so we have
  104. * to write it in a specific order to avoid the GPU branching to somewhere
  105. * else. 'wl_offset' is the offset to the first byte of the WAIT command.
  106. */
  107. static void etnaviv_buffer_replace_wait(struct etnaviv_cmdbuf *buffer,
  108. unsigned int wl_offset, u32 cmd, u32 arg)
  109. {
  110. u32 *lw = buffer->vaddr + wl_offset;
  111. lw[1] = arg;
  112. mb();
  113. lw[0] = cmd;
  114. mb();
  115. }
  116. /*
  117. * Ensure that there is space in the command buffer to contiguously write
  118. * 'cmd_dwords' 64-bit words into the buffer, wrapping if necessary.
  119. */
  120. static u32 etnaviv_buffer_reserve(struct etnaviv_gpu *gpu,
  121. struct etnaviv_cmdbuf *buffer, unsigned int cmd_dwords)
  122. {
  123. if (buffer->user_size + cmd_dwords * sizeof(u64) > buffer->size)
  124. buffer->user_size = 0;
  125. return etnaviv_cmdbuf_get_va(buffer,
  126. &gpu->mmu_context->cmdbuf_mapping) +
  127. buffer->user_size;
  128. }
  129. u16 etnaviv_buffer_init(struct etnaviv_gpu *gpu)
  130. {
  131. struct etnaviv_cmdbuf *buffer = &gpu->buffer;
  132. lockdep_assert_held(&gpu->lock);
  133. /* initialize buffer */
  134. buffer->user_size = 0;
  135. CMD_WAIT(buffer);
  136. CMD_LINK(buffer, 2,
  137. etnaviv_cmdbuf_get_va(buffer, &gpu->mmu_context->cmdbuf_mapping)
  138. + buffer->user_size - 4);
  139. return buffer->user_size / 8;
  140. }
  141. u16 etnaviv_buffer_config_mmuv2(struct etnaviv_gpu *gpu, u32 mtlb_addr, u32 safe_addr)
  142. {
  143. struct etnaviv_cmdbuf *buffer = &gpu->buffer;
  144. lockdep_assert_held(&gpu->lock);
  145. buffer->user_size = 0;
  146. if (gpu->identity.features & chipFeatures_PIPE_3D) {
  147. CMD_LOAD_STATE(buffer, VIVS_GL_PIPE_SELECT,
  148. VIVS_GL_PIPE_SELECT_PIPE(ETNA_PIPE_3D));
  149. CMD_LOAD_STATE(buffer, VIVS_MMUv2_CONFIGURATION,
  150. mtlb_addr | VIVS_MMUv2_CONFIGURATION_MODE_MODE4_K);
  151. CMD_LOAD_STATE(buffer, VIVS_MMUv2_SAFE_ADDRESS, safe_addr);
  152. CMD_SEM(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
  153. CMD_STALL(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
  154. }
  155. if (gpu->identity.features & chipFeatures_PIPE_2D) {
  156. CMD_LOAD_STATE(buffer, VIVS_GL_PIPE_SELECT,
  157. VIVS_GL_PIPE_SELECT_PIPE(ETNA_PIPE_2D));
  158. CMD_LOAD_STATE(buffer, VIVS_MMUv2_CONFIGURATION,
  159. mtlb_addr | VIVS_MMUv2_CONFIGURATION_MODE_MODE4_K);
  160. CMD_LOAD_STATE(buffer, VIVS_MMUv2_SAFE_ADDRESS, safe_addr);
  161. CMD_SEM(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
  162. CMD_STALL(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
  163. }
  164. CMD_END(buffer);
  165. buffer->user_size = ALIGN(buffer->user_size, 8);
  166. return buffer->user_size / 8;
  167. }
  168. u16 etnaviv_buffer_config_pta(struct etnaviv_gpu *gpu, unsigned short id)
  169. {
  170. struct etnaviv_cmdbuf *buffer = &gpu->buffer;
  171. lockdep_assert_held(&gpu->lock);
  172. buffer->user_size = 0;
  173. CMD_LOAD_STATE(buffer, VIVS_MMUv2_PTA_CONFIG,
  174. VIVS_MMUv2_PTA_CONFIG_INDEX(id));
  175. CMD_END(buffer);
  176. buffer->user_size = ALIGN(buffer->user_size, 8);
  177. return buffer->user_size / 8;
  178. }
  179. void etnaviv_buffer_end(struct etnaviv_gpu *gpu)
  180. {
  181. struct etnaviv_cmdbuf *buffer = &gpu->buffer;
  182. unsigned int waitlink_offset = buffer->user_size - 16;
  183. u32 link_target, flush = 0;
  184. bool has_blt = !!(gpu->identity.minor_features5 &
  185. chipMinorFeatures5_BLT_ENGINE);
  186. lockdep_assert_held(&gpu->lock);
  187. if (gpu->exec_state == ETNA_PIPE_2D)
  188. flush = VIVS_GL_FLUSH_CACHE_PE2D;
  189. else if (gpu->exec_state == ETNA_PIPE_3D)
  190. flush = VIVS_GL_FLUSH_CACHE_DEPTH |
  191. VIVS_GL_FLUSH_CACHE_COLOR |
  192. VIVS_GL_FLUSH_CACHE_TEXTURE |
  193. VIVS_GL_FLUSH_CACHE_TEXTUREVS |
  194. VIVS_GL_FLUSH_CACHE_SHADER_L2;
  195. if (flush) {
  196. unsigned int dwords = 7;
  197. if (has_blt)
  198. dwords += 10;
  199. link_target = etnaviv_buffer_reserve(gpu, buffer, dwords);
  200. CMD_SEM(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
  201. CMD_STALL(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
  202. if (has_blt) {
  203. CMD_LOAD_STATE(buffer, VIVS_BLT_ENABLE, 0x1);
  204. CMD_SEM(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_BLT);
  205. CMD_STALL(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_BLT);
  206. CMD_LOAD_STATE(buffer, VIVS_BLT_ENABLE, 0x0);
  207. }
  208. CMD_LOAD_STATE(buffer, VIVS_GL_FLUSH_CACHE, flush);
  209. if (gpu->exec_state == ETNA_PIPE_3D) {
  210. if (has_blt) {
  211. CMD_LOAD_STATE(buffer, VIVS_BLT_ENABLE, 0x1);
  212. CMD_LOAD_STATE(buffer, VIVS_BLT_SET_COMMAND, 0x1);
  213. CMD_LOAD_STATE(buffer, VIVS_BLT_ENABLE, 0x0);
  214. } else {
  215. CMD_LOAD_STATE(buffer, VIVS_TS_FLUSH_CACHE,
  216. VIVS_TS_FLUSH_CACHE_FLUSH);
  217. }
  218. }
  219. CMD_SEM(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
  220. CMD_STALL(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
  221. if (has_blt) {
  222. CMD_LOAD_STATE(buffer, VIVS_BLT_ENABLE, 0x1);
  223. CMD_SEM(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_BLT);
  224. CMD_STALL(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_BLT);
  225. CMD_LOAD_STATE(buffer, VIVS_BLT_ENABLE, 0x0);
  226. }
  227. CMD_END(buffer);
  228. etnaviv_buffer_replace_wait(buffer, waitlink_offset,
  229. VIV_FE_LINK_HEADER_OP_LINK |
  230. VIV_FE_LINK_HEADER_PREFETCH(dwords),
  231. link_target);
  232. } else {
  233. /* Replace the last link-wait with an "END" command */
  234. etnaviv_buffer_replace_wait(buffer, waitlink_offset,
  235. VIV_FE_END_HEADER_OP_END, 0);
  236. }
  237. }
  238. /* Append a 'sync point' to the ring buffer. */
  239. void etnaviv_sync_point_queue(struct etnaviv_gpu *gpu, unsigned int event)
  240. {
  241. struct etnaviv_cmdbuf *buffer = &gpu->buffer;
  242. unsigned int waitlink_offset = buffer->user_size - 16;
  243. u32 dwords, target;
  244. lockdep_assert_held(&gpu->lock);
  245. /*
  246. * We need at most 3 dwords in the return target:
  247. * 1 event + 1 end + 1 wait + 1 link.
  248. */
  249. dwords = 4;
  250. target = etnaviv_buffer_reserve(gpu, buffer, dwords);
  251. /* Signal sync point event */
  252. CMD_LOAD_STATE(buffer, VIVS_GL_EVENT, VIVS_GL_EVENT_EVENT_ID(event) |
  253. VIVS_GL_EVENT_FROM_PE);
  254. /* Stop the FE to 'pause' the GPU */
  255. CMD_END(buffer);
  256. /* Append waitlink */
  257. CMD_WAIT(buffer);
  258. CMD_LINK(buffer, 2,
  259. etnaviv_cmdbuf_get_va(buffer, &gpu->mmu_context->cmdbuf_mapping)
  260. + buffer->user_size - 4);
  261. /*
  262. * Kick off the 'sync point' command by replacing the previous
  263. * WAIT with a link to the address in the ring buffer.
  264. */
  265. etnaviv_buffer_replace_wait(buffer, waitlink_offset,
  266. VIV_FE_LINK_HEADER_OP_LINK |
  267. VIV_FE_LINK_HEADER_PREFETCH(dwords),
  268. target);
  269. }
  270. /* Append a command buffer to the ring buffer. */
  271. void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, u32 exec_state,
  272. struct etnaviv_iommu_context *mmu_context, unsigned int event,
  273. struct etnaviv_cmdbuf *cmdbuf)
  274. {
  275. struct etnaviv_cmdbuf *buffer = &gpu->buffer;
  276. unsigned int waitlink_offset = buffer->user_size - 16;
  277. u32 return_target, return_dwords;
  278. u32 link_target, link_dwords;
  279. bool switch_context = gpu->exec_state != exec_state;
  280. bool switch_mmu_context = gpu->mmu_context != mmu_context;
  281. unsigned int new_flush_seq = READ_ONCE(gpu->mmu_context->flush_seq);
  282. bool need_flush = switch_mmu_context || gpu->flush_seq != new_flush_seq;
  283. bool has_blt = !!(gpu->identity.minor_features5 &
  284. chipMinorFeatures5_BLT_ENGINE);
  285. lockdep_assert_held(&gpu->lock);
  286. if (drm_debug_enabled(DRM_UT_DRIVER))
  287. etnaviv_buffer_dump(gpu, buffer, 0, 0x50);
  288. link_target = etnaviv_cmdbuf_get_va(cmdbuf,
  289. &gpu->mmu_context->cmdbuf_mapping);
  290. link_dwords = cmdbuf->size / 8;
  291. /*
  292. * If we need maintenance prior to submitting this buffer, we will
  293. * need to append a mmu flush load state, followed by a new
  294. * link to this buffer - a total of four additional words.
  295. */
  296. if (need_flush || switch_context) {
  297. u32 target, extra_dwords;
  298. /* link command */
  299. extra_dwords = 1;
  300. /* flush command */
  301. if (need_flush) {
  302. if (gpu->mmu_context->global->version == ETNAVIV_IOMMU_V1)
  303. extra_dwords += 1;
  304. else
  305. extra_dwords += 3;
  306. }
  307. /* pipe switch commands */
  308. if (switch_context)
  309. extra_dwords += 4;
  310. /* PTA load command */
  311. if (switch_mmu_context && gpu->sec_mode == ETNA_SEC_KERNEL)
  312. extra_dwords += 1;
  313. target = etnaviv_buffer_reserve(gpu, buffer, extra_dwords);
  314. /*
  315. * Switch MMU context if necessary. Must be done after the
  316. * link target has been calculated, as the jump forward in the
  317. * kernel ring still uses the last active MMU context before
  318. * the switch.
  319. */
  320. if (switch_mmu_context) {
  321. struct etnaviv_iommu_context *old_context = gpu->mmu_context;
  322. gpu->mmu_context = etnaviv_iommu_context_get(mmu_context);
  323. etnaviv_iommu_context_put(old_context);
  324. }
  325. if (need_flush) {
  326. /* Add the MMU flush */
  327. if (gpu->mmu_context->global->version == ETNAVIV_IOMMU_V1) {
  328. CMD_LOAD_STATE(buffer, VIVS_GL_FLUSH_MMU,
  329. VIVS_GL_FLUSH_MMU_FLUSH_FEMMU |
  330. VIVS_GL_FLUSH_MMU_FLUSH_UNK1 |
  331. VIVS_GL_FLUSH_MMU_FLUSH_UNK2 |
  332. VIVS_GL_FLUSH_MMU_FLUSH_PEMMU |
  333. VIVS_GL_FLUSH_MMU_FLUSH_UNK4);
  334. } else {
  335. u32 flush = VIVS_MMUv2_CONFIGURATION_MODE_MASK |
  336. VIVS_MMUv2_CONFIGURATION_FLUSH_FLUSH;
  337. if (switch_mmu_context &&
  338. gpu->sec_mode == ETNA_SEC_KERNEL) {
  339. unsigned short id =
  340. etnaviv_iommuv2_get_pta_id(gpu->mmu_context);
  341. CMD_LOAD_STATE(buffer,
  342. VIVS_MMUv2_PTA_CONFIG,
  343. VIVS_MMUv2_PTA_CONFIG_INDEX(id));
  344. }
  345. if (gpu->sec_mode == ETNA_SEC_NONE)
  346. flush |= etnaviv_iommuv2_get_mtlb_addr(gpu->mmu_context);
  347. CMD_LOAD_STATE(buffer, VIVS_MMUv2_CONFIGURATION,
  348. flush);
  349. CMD_SEM(buffer, SYNC_RECIPIENT_FE,
  350. SYNC_RECIPIENT_PE);
  351. CMD_STALL(buffer, SYNC_RECIPIENT_FE,
  352. SYNC_RECIPIENT_PE);
  353. }
  354. gpu->flush_seq = new_flush_seq;
  355. }
  356. if (switch_context) {
  357. etnaviv_cmd_select_pipe(gpu, buffer, exec_state);
  358. gpu->exec_state = exec_state;
  359. }
  360. /* And the link to the submitted buffer */
  361. link_target = etnaviv_cmdbuf_get_va(cmdbuf,
  362. &gpu->mmu_context->cmdbuf_mapping);
  363. CMD_LINK(buffer, link_dwords, link_target);
  364. /* Update the link target to point to above instructions */
  365. link_target = target;
  366. link_dwords = extra_dwords;
  367. }
  368. /*
  369. * Append a LINK to the submitted command buffer to return to
  370. * the ring buffer. return_target is the ring target address.
  371. * We need at most 7 dwords in the return target: 2 cache flush +
  372. * 2 semaphore stall + 1 event + 1 wait + 1 link.
  373. */
  374. return_dwords = 7;
  375. /*
  376. * When the BLT engine is present we need 6 more dwords in the return
  377. * target: 3 enable/flush/disable + 4 enable/semaphore stall/disable,
  378. * but we don't need the normal TS flush state.
  379. */
  380. if (has_blt)
  381. return_dwords += 6;
  382. return_target = etnaviv_buffer_reserve(gpu, buffer, return_dwords);
  383. CMD_LINK(cmdbuf, return_dwords, return_target);
  384. /*
  385. * Append a cache flush, stall, event, wait and link pointing back to
  386. * the wait command to the ring buffer.
  387. */
  388. if (gpu->exec_state == ETNA_PIPE_2D) {
  389. CMD_LOAD_STATE(buffer, VIVS_GL_FLUSH_CACHE,
  390. VIVS_GL_FLUSH_CACHE_PE2D);
  391. } else {
  392. CMD_LOAD_STATE(buffer, VIVS_GL_FLUSH_CACHE,
  393. VIVS_GL_FLUSH_CACHE_DEPTH |
  394. VIVS_GL_FLUSH_CACHE_COLOR);
  395. if (has_blt) {
  396. CMD_LOAD_STATE(buffer, VIVS_BLT_ENABLE, 0x1);
  397. CMD_LOAD_STATE(buffer, VIVS_BLT_SET_COMMAND, 0x1);
  398. CMD_LOAD_STATE(buffer, VIVS_BLT_ENABLE, 0x0);
  399. } else {
  400. CMD_LOAD_STATE(buffer, VIVS_TS_FLUSH_CACHE,
  401. VIVS_TS_FLUSH_CACHE_FLUSH);
  402. }
  403. }
  404. CMD_SEM(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
  405. CMD_STALL(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
  406. if (has_blt) {
  407. CMD_LOAD_STATE(buffer, VIVS_BLT_ENABLE, 0x1);
  408. CMD_SEM(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_BLT);
  409. CMD_STALL(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_BLT);
  410. CMD_LOAD_STATE(buffer, VIVS_BLT_ENABLE, 0x0);
  411. }
  412. CMD_LOAD_STATE(buffer, VIVS_GL_EVENT, VIVS_GL_EVENT_EVENT_ID(event) |
  413. VIVS_GL_EVENT_FROM_PE);
  414. CMD_WAIT(buffer);
  415. CMD_LINK(buffer, 2,
  416. etnaviv_cmdbuf_get_va(buffer, &gpu->mmu_context->cmdbuf_mapping)
  417. + buffer->user_size - 4);
  418. if (drm_debug_enabled(DRM_UT_DRIVER))
  419. pr_info("stream link to 0x%08x @ 0x%08x %p\n",
  420. return_target,
  421. etnaviv_cmdbuf_get_va(cmdbuf, &gpu->mmu_context->cmdbuf_mapping),
  422. cmdbuf->vaddr);
  423. if (drm_debug_enabled(DRM_UT_DRIVER)) {
  424. print_hex_dump(KERN_INFO, "cmd ", DUMP_PREFIX_OFFSET, 16, 4,
  425. cmdbuf->vaddr, cmdbuf->size, 0);
  426. pr_info("link op: %p\n", buffer->vaddr + waitlink_offset);
  427. pr_info("addr: 0x%08x\n", link_target);
  428. pr_info("back: 0x%08x\n", return_target);
  429. pr_info("event: %d\n", event);
  430. }
  431. /*
  432. * Kick off the submitted command by replacing the previous
  433. * WAIT with a link to the address in the ring buffer.
  434. */
  435. etnaviv_buffer_replace_wait(buffer, waitlink_offset,
  436. VIV_FE_LINK_HEADER_OP_LINK |
  437. VIV_FE_LINK_HEADER_PREFETCH(link_dwords),
  438. link_target);
  439. if (drm_debug_enabled(DRM_UT_DRIVER))
  440. etnaviv_buffer_dump(gpu, buffer, 0, 0x50);
  441. }