adreno_a5xx_ringbuffer.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
  4. * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
  5. */
  6. #include "adreno.h"
  7. #include "adreno_a5xx.h"
  8. #include "adreno_pm4types.h"
  9. #include "adreno_ringbuffer.h"
  10. #include "adreno_trace.h"
  11. #include "kgsl_trace.h"
  12. static int a5xx_rb_pagetable_switch(struct kgsl_device *device,
  13. struct adreno_context *drawctxt,
  14. struct adreno_ringbuffer *rb,
  15. struct kgsl_pagetable *pagetable, u32 *cmds)
  16. {
  17. u64 ttbr0 = kgsl_mmu_pagetable_get_ttbr0(pagetable);
  18. u32 id = drawctxt ? drawctxt->base.id : 0;
  19. if (pagetable == device->mmu.defaultpagetable)
  20. return 0;
  21. cmds[0] = cp_type7_packet(CP_SMMU_TABLE_UPDATE, 3);
  22. cmds[1] = lower_32_bits(ttbr0);
  23. cmds[2] = upper_32_bits(ttbr0);
  24. cmds[3] = id;
  25. cmds[4] = cp_type7_packet(CP_WAIT_FOR_IDLE, 0);
  26. cmds[5] = cp_type7_packet(CP_WAIT_FOR_ME, 0);
  27. cmds[6] = cp_type4_packet(A5XX_CP_CNTL, 1);
  28. cmds[7] = 1;
  29. cmds[8] = cp_type7_packet(CP_MEM_WRITE, 5);
  30. cmds[9] = lower_32_bits(SCRATCH_RB_GPU_ADDR(device,
  31. rb->id, ttbr0));
  32. cmds[10] = upper_32_bits(SCRATCH_RB_GPU_ADDR(device,
  33. rb->id, ttbr0));
  34. cmds[11] = lower_32_bits(ttbr0);
  35. cmds[12] = upper_32_bits(ttbr0);
  36. cmds[13] = id;
  37. cmds[14] = cp_type7_packet(CP_WAIT_FOR_IDLE, 0);
  38. cmds[15] = cp_type7_packet(CP_WAIT_FOR_ME, 0);
  39. cmds[16] = cp_type4_packet(A5XX_CP_CNTL, 1);
  40. cmds[17] = 0;
  41. return 18;
  42. }
  43. #define RB_SOPTIMESTAMP(device, rb) \
  44. MEMSTORE_RB_GPU_ADDR(device, rb, soptimestamp)
  45. #define CTXT_SOPTIMESTAMP(device, drawctxt) \
  46. MEMSTORE_ID_GPU_ADDR(device, (drawctxt)->base.id, soptimestamp)
  47. #define RB_EOPTIMESTAMP(device, rb) \
  48. MEMSTORE_RB_GPU_ADDR(device, rb, eoptimestamp)
  49. #define CTXT_EOPTIMESTAMP(device, drawctxt) \
  50. MEMSTORE_ID_GPU_ADDR(device, (drawctxt)->base.id, eoptimestamp)
  51. int a5xx_ringbuffer_submit(struct adreno_ringbuffer *rb,
  52. struct adreno_submit_time *time, bool sync)
  53. {
  54. struct adreno_device *adreno_dev = ADRENO_RB_DEVICE(rb);
  55. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  56. unsigned long flags;
  57. adreno_get_submit_time(adreno_dev, rb, time);
  58. adreno_profile_submit_time(time);
  59. if (sync) {
  60. u32 *cmds = adreno_ringbuffer_allocspace(rb, 3);
  61. if (IS_ERR(cmds))
  62. return PTR_ERR(cmds);
  63. cmds[0] = cp_type7_packet(CP_WHERE_AM_I, 2);
  64. cmds[1] = lower_32_bits(SCRATCH_RB_GPU_ADDR(device, rb->id,
  65. rptr));
  66. cmds[2] = upper_32_bits(SCRATCH_RB_GPU_ADDR(device, rb->id,
  67. rptr));
  68. }
  69. spin_lock_irqsave(&rb->preempt_lock, flags);
  70. if (adreno_in_preempt_state(adreno_dev, ADRENO_PREEMPT_NONE)) {
  71. if (adreno_dev->cur_rb == rb) {
  72. kgsl_pwrscale_busy(device);
  73. kgsl_regwrite(device, A5XX_CP_RB_WPTR, rb->_wptr);
  74. }
  75. }
  76. rb->wptr = rb->_wptr;
  77. spin_unlock_irqrestore(&rb->preempt_lock, flags);
  78. return 0;
  79. }
  80. int a5xx_ringbuffer_init(struct adreno_device *adreno_dev)
  81. {
  82. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  83. int i;
  84. if (IS_ERR_OR_NULL(device->scratch))
  85. device->scratch = kgsl_allocate_global(device, PAGE_SIZE,
  86. 0, 0, KGSL_MEMDESC_RANDOM | KGSL_MEMDESC_PRIVILEGED,
  87. "scratch");
  88. if (IS_ERR(device->scratch))
  89. return PTR_ERR(device->scratch);
  90. adreno_dev->cur_rb = &(adreno_dev->ringbuffers[0]);
  91. if (!adreno_preemption_feature_set(adreno_dev)) {
  92. adreno_dev->num_ringbuffers = 1;
  93. return adreno_ringbuffer_setup(adreno_dev,
  94. &adreno_dev->ringbuffers[0], 0);
  95. }
  96. adreno_dev->num_ringbuffers = ARRAY_SIZE(adreno_dev->ringbuffers);
  97. for (i = 0; i < adreno_dev->num_ringbuffers; i++) {
  98. int ret;
  99. ret = adreno_ringbuffer_setup(adreno_dev,
  100. &adreno_dev->ringbuffers[i], i);
  101. if (ret)
  102. return ret;
  103. }
  104. timer_setup(&adreno_dev->preempt.timer, adreno_preemption_timer, 0);
  105. a5xx_preemption_init(adreno_dev);
  106. return 0;
  107. }
  108. #define A5XX_SUBMIT_MAX 64
  109. int a5xx_ringbuffer_addcmds(struct adreno_device *adreno_dev,
  110. struct adreno_ringbuffer *rb, struct adreno_context *drawctxt,
  111. u32 flags, u32 *in, u32 dwords, u32 timestamp,
  112. struct adreno_submit_time *time)
  113. {
  114. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  115. static u32 sequence;
  116. u32 size = A5XX_SUBMIT_MAX + dwords;
  117. u32 *cmds, index = 0;
  118. u64 profile_gpuaddr;
  119. u32 profile_dwords;
  120. if (adreno_drawctxt_detached(drawctxt))
  121. return -ENOENT;
  122. if (adreno_gpu_fault(adreno_dev) != 0)
  123. return -EPROTO;
  124. rb->timestamp++;
  125. if (drawctxt)
  126. drawctxt->internal_timestamp = rb->timestamp;
  127. cmds = adreno_ringbuffer_allocspace(rb, size);
  128. if (IS_ERR(cmds))
  129. return PTR_ERR(cmds);
  130. /* Identify the start of a command */
  131. cmds[index++] = cp_type7_packet(CP_NOP, 1);
  132. cmds[index++] = drawctxt ? CMD_IDENTIFIER : CMD_INTERNAL_IDENTIFIER;
  133. /* 14 dwords */
  134. index += a5xx_preemption_pre_ibsubmit(adreno_dev, rb, drawctxt,
  135. &cmds[index]);
  136. profile_gpuaddr = adreno_profile_preib_processing(adreno_dev,
  137. drawctxt, &profile_dwords);
  138. if (profile_gpuaddr) {
  139. cmds[index++] = cp_type7_packet(CP_INDIRECT_BUFFER_PFE, 3);
  140. cmds[index++] = lower_32_bits(profile_gpuaddr);
  141. cmds[index++] = upper_32_bits(profile_gpuaddr);
  142. cmds[index++] = profile_dwords;
  143. }
  144. if (drawctxt) {
  145. cmds[index++] = cp_type7_packet(CP_MEM_WRITE, 3);
  146. cmds[index++] = lower_32_bits(CTXT_SOPTIMESTAMP(device,
  147. drawctxt));
  148. cmds[index++] = upper_32_bits(CTXT_SOPTIMESTAMP(device,
  149. drawctxt));
  150. cmds[index++] = timestamp;
  151. }
  152. cmds[index++] = cp_type7_packet(CP_MEM_WRITE, 3);
  153. cmds[index++] = lower_32_bits(RB_SOPTIMESTAMP(device, rb));
  154. cmds[index++] = upper_32_bits(RB_SOPTIMESTAMP(device, rb));
  155. cmds[index++] = rb->timestamp;
  156. if (IS_SECURE(flags)) {
  157. cmds[index++] = cp_type7_packet(CP_SET_SECURE_MODE, 1);
  158. cmds[index++] = 1;
  159. }
  160. if (IS_NOTPROTECTED(flags)) {
  161. cmds[index++] = cp_type7_packet(CP_SET_PROTECTED_MODE, 1);
  162. cmds[index++] = 0;
  163. }
  164. memcpy(&cmds[index], in, dwords << 2);
  165. index += dwords;
  166. if (IS_NOTPROTECTED(flags)) {
  167. cmds[index++] = cp_type7_packet(CP_SET_PROTECTED_MODE, 1);
  168. cmds[index++] = 1;
  169. }
  170. /* 4 dwords */
  171. profile_gpuaddr = adreno_profile_postib_processing(adreno_dev,
  172. drawctxt, &profile_dwords);
  173. if (profile_gpuaddr) {
  174. cmds[index++] = cp_type7_packet(CP_INDIRECT_BUFFER_PFE, 3);
  175. cmds[index++] = lower_32_bits(profile_gpuaddr);
  176. cmds[index++] = upper_32_bits(profile_gpuaddr);
  177. cmds[index++] = profile_dwords;
  178. }
  179. if (!adreno_is_a510(adreno_dev) &&
  180. test_bit(KGSL_FT_PAGEFAULT_GPUHALT_ENABLE,
  181. &device->mmu.pfpolicy))
  182. cmds[index++] = cp_type7_packet(CP_WAIT_MEM_WRITES, 0);
  183. /*
  184. * Do a unique memory write from the GPU to assist in early detection of
  185. * interrupt storms
  186. */
  187. cmds[index++] = cp_type7_packet(CP_MEM_WRITE, 3);
  188. cmds[index++] = lower_32_bits(MEMSTORE_ID_GPU_ADDR(device,
  189. KGSL_MEMSTORE_GLOBAL, ref_wait_ts));
  190. cmds[index++] = upper_32_bits(MEMSTORE_ID_GPU_ADDR(device,
  191. KGSL_MEMSTORE_GLOBAL, ref_wait_ts));
  192. cmds[index++] = ++sequence;
  193. /*
  194. * If this is an internal command, just write the ringbuffer timestamp,
  195. * otherwise, write both
  196. */
  197. if (!drawctxt) {
  198. cmds[index++] = cp_type7_packet(CP_EVENT_WRITE, 4);
  199. cmds[index++] = CACHE_FLUSH_TS | (1 << 31);
  200. cmds[index++] = lower_32_bits(RB_EOPTIMESTAMP(device, rb));
  201. cmds[index++] = upper_32_bits(RB_EOPTIMESTAMP(device, rb));
  202. cmds[index++] = rb->timestamp;
  203. } else {
  204. cmds[index++] = cp_type7_packet(CP_EVENT_WRITE, 4);
  205. cmds[index++] = CACHE_FLUSH_TS | (1 << 31);
  206. cmds[index++] = lower_32_bits(CTXT_EOPTIMESTAMP(device,
  207. drawctxt));
  208. cmds[index++] = upper_32_bits(CTXT_EOPTIMESTAMP(device,
  209. drawctxt));
  210. cmds[index++] = timestamp;
  211. cmds[index++] = cp_type7_packet(CP_EVENT_WRITE, 4);
  212. cmds[index++] = CACHE_FLUSH_TS;
  213. cmds[index++] = lower_32_bits(RB_EOPTIMESTAMP(device, rb));
  214. cmds[index++] = upper_32_bits(RB_EOPTIMESTAMP(device, rb));
  215. cmds[index++] = rb->timestamp;
  216. }
  217. if (IS_WFI(flags))
  218. cmds[index++] = cp_type7_packet(CP_WAIT_FOR_IDLE, 0);
  219. if (IS_SECURE(flags)) {
  220. cmds[index++] = cp_type7_packet(CP_SET_SECURE_MODE, 1);
  221. cmds[index++] = 0;
  222. }
  223. /* 5 dwords */
  224. index += a5xx_preemption_post_ibsubmit(adreno_dev, &cmds[index]);
  225. /* Adjust the thing for the number of bytes we actually wrote */
  226. rb->_wptr -= (size - index);
  227. a5xx_ringbuffer_submit(rb, time,
  228. !adreno_is_preemption_enabled(adreno_dev));
  229. return 0;
  230. }
  231. static u32 a5xx_get_alwayson_counter(struct adreno_device *adreno_dev,
  232. u32 *cmds, u64 gpuaddr)
  233. {
  234. cmds[0] = cp_type7_packet(CP_REG_TO_MEM, 3);
  235. cmds[1] = A5XX_RBBM_ALWAYSON_COUNTER_LO;
  236. /* On some targets the upper 32 bits are not reliable */
  237. if (ADRENO_GPUREV(adreno_dev) > ADRENO_REV_A530)
  238. cmds[1] |= (1 << 30) | (2 << 18);
  239. cmds[2] = lower_32_bits(gpuaddr);
  240. cmds[3] = upper_32_bits(gpuaddr);
  241. return 4;
  242. }
  243. /* This is the maximum possible size for 64 bit targets */
  244. #define PROFILE_IB_DWORDS 4
  245. #define PROFILE_IB_SLOTS (PAGE_SIZE / (PROFILE_IB_DWORDS << 2))
  246. static u64 a5xx_get_user_profiling_ib(struct adreno_device *adreno_dev,
  247. struct adreno_ringbuffer *rb, struct kgsl_drawobj_cmd *cmdobj,
  248. u32 target_offset, u32 *cmds)
  249. {
  250. u32 offset, *ib, dwords;
  251. u64 gpuaddr;
  252. if (IS_ERR(rb->profile_desc))
  253. return 0;
  254. offset = rb->profile_index * (PROFILE_IB_DWORDS << 2);
  255. ib = rb->profile_desc->hostptr + offset;
  256. gpuaddr = rb->profile_desc->gpuaddr + offset;
  257. dwords = a5xx_get_alwayson_counter(adreno_dev, ib,
  258. cmdobj->profiling_buffer_gpuaddr + target_offset);
  259. cmds[0] = cp_type7_packet(CP_INDIRECT_BUFFER_PFE, 3);
  260. cmds[1] = lower_32_bits(gpuaddr);
  261. cmds[2] = upper_32_bits(gpuaddr);
  262. cmds[3] = dwords;
  263. rb->profile_index = (rb->profile_index + 1) % PROFILE_IB_SLOTS;
  264. return 4;
  265. }
  266. static int a5xx_rb_context_switch(struct adreno_device *adreno_dev,
  267. struct adreno_ringbuffer *rb,
  268. struct adreno_context *drawctxt)
  269. {
  270. struct kgsl_pagetable *pagetable =
  271. adreno_drawctxt_get_pagetable(drawctxt);
  272. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  273. int count = 0;
  274. u32 cmds[32];
  275. if (adreno_drawctxt_get_pagetable(rb->drawctxt_active) != pagetable)
  276. count += a5xx_rb_pagetable_switch(device, drawctxt,
  277. rb, pagetable, cmds);
  278. cmds[count++] = cp_type7_packet(CP_NOP, 1);
  279. cmds[count++] = CONTEXT_TO_MEM_IDENTIFIER;
  280. cmds[count++] = cp_type7_packet(CP_MEM_WRITE, 3);
  281. cmds[count++] = lower_32_bits(MEMSTORE_RB_GPU_ADDR(device, rb,
  282. current_context));
  283. cmds[count++] = upper_32_bits(MEMSTORE_RB_GPU_ADDR(device, rb,
  284. current_context));
  285. cmds[count++] = drawctxt->base.id;
  286. cmds[count++] = cp_type7_packet(CP_MEM_WRITE, 3);
  287. cmds[count++] = lower_32_bits(MEMSTORE_ID_GPU_ADDR(device,
  288. KGSL_MEMSTORE_GLOBAL, current_context));
  289. cmds[count++] = upper_32_bits(MEMSTORE_ID_GPU_ADDR(device,
  290. KGSL_MEMSTORE_GLOBAL, current_context));
  291. cmds[count++] = drawctxt->base.id;
  292. cmds[count++] = cp_type4_packet(A5XX_UCHE_INVALIDATE0, 1);
  293. cmds[count++] = 0x12;
  294. return a5xx_ringbuffer_addcmds(adreno_dev, rb, NULL, F_NOTPROTECTED,
  295. cmds, count, 0, NULL);
  296. }
  297. static int a5xx_drawctxt_switch(struct adreno_device *adreno_dev,
  298. struct adreno_ringbuffer *rb,
  299. struct adreno_context *drawctxt)
  300. {
  301. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  302. if (rb->drawctxt_active == drawctxt)
  303. return 0;
  304. if (kgsl_context_detached(&drawctxt->base))
  305. return -ENOENT;
  306. if (!_kgsl_context_get(&drawctxt->base))
  307. return -ENOENT;
  308. trace_adreno_drawctxt_switch(rb, drawctxt);
  309. a5xx_rb_context_switch(adreno_dev, rb, drawctxt);
  310. /* Release the current drawctxt as soon as the new one is switched */
  311. adreno_put_drawctxt_on_timestamp(device, rb->drawctxt_active,
  312. rb, rb->timestamp);
  313. rb->drawctxt_active = drawctxt;
  314. return 0;
  315. }
  316. #define A5XX_USER_PROFILE_IB(dev, rb, cmdobj, cmds, field) \
  317. a5xx_get_user_profiling_ib((dev), (rb), (cmdobj), \
  318. offsetof(struct kgsl_drawobj_profiling_buffer, field), \
  319. (cmds))
  320. #define A5XX_KERNEL_PROFILE(dev, cmdobj, cmds, field) \
  321. a5xx_get_alwayson_counter((dev), (cmds), \
  322. (dev)->profile_buffer->gpuaddr + \
  323. ADRENO_DRAWOBJ_PROFILE_OFFSET((cmdobj)->profile_index, \
  324. field))
  325. #define A5XX_COMMAND_DWORDS 32
  326. int a5xx_ringbuffer_submitcmd(struct adreno_device *adreno_dev,
  327. struct kgsl_drawobj_cmd *cmdobj, u32 flags,
  328. struct adreno_submit_time *time)
  329. {
  330. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  331. struct kgsl_drawobj *drawobj = DRAWOBJ(cmdobj);
  332. struct adreno_context *drawctxt = ADRENO_CONTEXT(drawobj->context);
  333. struct adreno_ringbuffer *rb = drawctxt->rb;
  334. int ret = 0, numibs = 0, index = 0;
  335. u32 *cmds;
  336. /* Count the number of IBs (if we are not skipping) */
  337. if (!IS_SKIP(flags)) {
  338. struct list_head *tmp;
  339. list_for_each(tmp, &cmdobj->cmdlist)
  340. numibs++;
  341. }
  342. cmds = kmalloc((A5XX_COMMAND_DWORDS + (numibs * 5)) << 2, GFP_KERNEL);
  343. if (!cmds) {
  344. ret = -ENOMEM;
  345. goto done;
  346. }
  347. cmds[index++] = cp_type7_packet(CP_NOP, 1);
  348. cmds[index++] = START_IB_IDENTIFIER;
  349. /* Kernel profiling: 4 dwords */
  350. if (IS_KERNEL_PROFILE(flags))
  351. index += A5XX_KERNEL_PROFILE(adreno_dev, cmdobj, &cmds[index],
  352. started);
  353. /* User profiling: 4 dwords */
  354. if (IS_USER_PROFILE(flags))
  355. index += A5XX_USER_PROFILE_IB(adreno_dev, rb, cmdobj,
  356. &cmds[index], gpu_ticks_submitted);
  357. if (numibs) {
  358. struct kgsl_memobj_node *ib;
  359. list_for_each_entry(ib, &cmdobj->cmdlist, node) {
  360. if (ib->priv & MEMOBJ_SKIP ||
  361. (ib->flags & KGSL_CMDLIST_CTXTSWITCH_PREAMBLE
  362. && !IS_PREAMBLE(flags)))
  363. cmds[index++] = cp_type7_packet(CP_NOP, 4);
  364. cmds[index++] =
  365. cp_type7_packet(CP_INDIRECT_BUFFER_PFE, 3);
  366. cmds[index++] = lower_32_bits(ib->gpuaddr);
  367. cmds[index++] = upper_32_bits(ib->gpuaddr);
  368. /* Double check that IB_PRIV is never set */
  369. cmds[index++] = (ib->size >> 2) & 0xfffff;
  370. }
  371. }
  372. /*
  373. * SRM -- set render mode (ex binning, direct render etc)
  374. * SRM is set by UMD usually at start of IB to tell CP the type of
  375. * preemption.
  376. * KMD needs to set SRM to NULL to indicate CP that rendering is
  377. * done by IB.
  378. */
  379. cmds[index++] = cp_type7_packet(CP_SET_RENDER_MODE, 5);
  380. cmds[index++] = 0;
  381. cmds[index++] = 0;
  382. cmds[index++] = 0;
  383. cmds[index++] = 0;
  384. cmds[index++] = 0;
  385. cmds[index++] = cp_type7_packet(CP_YIELD_ENABLE, 1);
  386. cmds[index++] = 1;
  387. /* 4 dwords */
  388. if (IS_KERNEL_PROFILE(flags))
  389. index += A5XX_KERNEL_PROFILE(adreno_dev, cmdobj, &cmds[index],
  390. retired);
  391. /* 4 dwords */
  392. if (IS_USER_PROFILE(flags))
  393. index += A5XX_USER_PROFILE_IB(adreno_dev, rb, cmdobj,
  394. &cmds[index], gpu_ticks_retired);
  395. cmds[index++] = cp_type7_packet(CP_NOP, 1);
  396. cmds[index++] = END_IB_IDENTIFIER;
  397. ret = a5xx_drawctxt_switch(adreno_dev, rb, drawctxt);
  398. /*
  399. * In the unlikely event of an error in the drawctxt switch,
  400. * treat it like a hang
  401. */
  402. if (ret) {
  403. /*
  404. * It is "normal" to get a -ENOSPC or a -ENOENT. Don't log it,
  405. * the upper layers know how to handle it
  406. */
  407. if (ret != -ENOSPC && ret != -ENOENT)
  408. dev_err(device->dev,
  409. "Unable to switch draw context: %d\n",
  410. ret);
  411. goto done;
  412. }
  413. adreno_drawobj_set_constraint(device, drawobj);
  414. ret = a5xx_ringbuffer_addcmds(adreno_dev, drawctxt->rb, drawctxt,
  415. flags, cmds, index, drawobj->timestamp, time);
  416. done:
  417. trace_kgsl_issueibcmds(device, drawctxt->base.id, numibs,
  418. drawobj->timestamp, drawobj->flags, ret, drawctxt->type);
  419. kfree(cmds);
  420. return ret;
  421. }