adreno_a5xx_preempt.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2014-2017,2021 The Linux Foundation. All rights reserved.
  4. * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
  5. */
  6. #include "adreno.h"
  7. #include "adreno_a5xx.h"
  8. #include "adreno_pm4types.h"
  9. #include "adreno_trace.h"
  10. #define PREEMPT_RECORD(_field) \
  11. offsetof(struct a5xx_cp_preemption_record, _field)
  12. #define PREEMPT_SMMU_RECORD(_field) \
  13. offsetof(struct a5xx_cp_smmu_info, _field)
  14. static void _update_wptr(struct adreno_device *adreno_dev, bool reset_timer)
  15. {
  16. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  17. struct adreno_ringbuffer *rb = adreno_dev->cur_rb;
  18. unsigned int wptr;
  19. unsigned long flags;
  20. spin_lock_irqsave(&rb->preempt_lock, flags);
  21. kgsl_regread(device, A5XX_CP_RB_WPTR, &wptr);
  22. if (wptr != rb->wptr) {
  23. kgsl_regwrite(device, A5XX_CP_RB_WPTR, rb->wptr);
  24. /*
  25. * In case something got submitted while preemption was on
  26. * going, reset the timer.
  27. */
  28. reset_timer = true;
  29. }
  30. if (reset_timer)
  31. rb->dispatch_q.expires = jiffies +
  32. msecs_to_jiffies(adreno_drawobj_timeout);
  33. spin_unlock_irqrestore(&rb->preempt_lock, flags);
  34. }
  35. static void _a5xx_preemption_done(struct adreno_device *adreno_dev)
  36. {
  37. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  38. unsigned int status;
  39. /*
  40. * In the very unlikely case that the power is off, do nothing - the
  41. * state will be reset on power up and everybody will be happy
  42. */
  43. if (!kgsl_state_is_awake(device))
  44. return;
  45. kgsl_regread(device, A5XX_CP_CONTEXT_SWITCH_CNTL, &status);
  46. if (status != 0) {
  47. dev_err(device->dev,
  48. "Preemption not complete: status=%X cur=%d R/W=%X/%X next=%d R/W=%X/%X\n",
  49. status, adreno_dev->cur_rb->id,
  50. adreno_get_rptr(adreno_dev->cur_rb),
  51. adreno_dev->cur_rb->wptr,
  52. adreno_dev->next_rb->id,
  53. adreno_get_rptr(adreno_dev->next_rb),
  54. adreno_dev->next_rb->wptr);
  55. /* Set a fault and restart */
  56. adreno_dispatcher_fault(adreno_dev, ADRENO_PREEMPT_FAULT);
  57. return;
  58. }
  59. del_timer_sync(&adreno_dev->preempt.timer);
  60. trace_adreno_preempt_done(adreno_dev->cur_rb->id, adreno_dev->next_rb->id, 0, 0);
  61. /* Clean up all the bits */
  62. adreno_dev->prev_rb = adreno_dev->cur_rb;
  63. adreno_dev->cur_rb = adreno_dev->next_rb;
  64. adreno_dev->next_rb = NULL;
  65. /* Update the wptr for the new command queue */
  66. _update_wptr(adreno_dev, true);
  67. /* Update the dispatcher timer for the new command queue */
  68. mod_timer(&adreno_dev->dispatcher.timer,
  69. adreno_dev->cur_rb->dispatch_q.expires);
  70. /* Clear the preempt state */
  71. adreno_set_preempt_state(adreno_dev, ADRENO_PREEMPT_NONE);
  72. }
  73. static void _a5xx_preemption_fault(struct adreno_device *adreno_dev)
  74. {
  75. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  76. unsigned int status;
  77. /*
  78. * If the power is on check the preemption status one more time - if it
  79. * was successful then just transition to the complete state
  80. */
  81. if (kgsl_state_is_awake(device)) {
  82. kgsl_regread(device, A5XX_CP_CONTEXT_SWITCH_CNTL, &status);
  83. if (status == 0) {
  84. adreno_set_preempt_state(adreno_dev,
  85. ADRENO_PREEMPT_COMPLETE);
  86. adreno_dispatcher_schedule(device);
  87. return;
  88. }
  89. }
  90. dev_err(device->dev,
  91. "Preemption timed out: cur=%d R/W=%X/%X, next=%d R/W=%X/%X\n",
  92. adreno_dev->cur_rb->id,
  93. adreno_get_rptr(adreno_dev->cur_rb),
  94. adreno_dev->cur_rb->wptr,
  95. adreno_dev->next_rb->id,
  96. adreno_get_rptr(adreno_dev->next_rb),
  97. adreno_dev->next_rb->wptr);
  98. adreno_dispatcher_fault(adreno_dev, ADRENO_PREEMPT_FAULT);
  99. }
  100. static void _a5xx_preemption_worker(struct work_struct *work)
  101. {
  102. struct adreno_preemption *preempt = container_of(work,
  103. struct adreno_preemption, work);
  104. struct adreno_device *adreno_dev = container_of(preempt,
  105. struct adreno_device, preempt);
  106. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  107. /* Need to take the mutex to make sure that the power stays on */
  108. mutex_lock(&device->mutex);
  109. if (adreno_in_preempt_state(adreno_dev, ADRENO_PREEMPT_FAULTED))
  110. _a5xx_preemption_fault(adreno_dev);
  111. mutex_unlock(&device->mutex);
  112. }
  113. /* Find the highest priority active ringbuffer */
  114. static struct adreno_ringbuffer *a5xx_next_ringbuffer(
  115. struct adreno_device *adreno_dev)
  116. {
  117. struct adreno_ringbuffer *rb;
  118. unsigned long flags;
  119. unsigned int i;
  120. FOR_EACH_RINGBUFFER(adreno_dev, rb, i) {
  121. bool empty;
  122. spin_lock_irqsave(&rb->preempt_lock, flags);
  123. empty = adreno_rb_empty(rb);
  124. spin_unlock_irqrestore(&rb->preempt_lock, flags);
  125. if (!empty)
  126. return rb;
  127. }
  128. return NULL;
  129. }
  130. void a5xx_preemption_trigger(struct adreno_device *adreno_dev)
  131. {
  132. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  133. struct kgsl_iommu *iommu = KGSL_IOMMU(device);
  134. struct adreno_ringbuffer *next;
  135. uint64_t ttbr0;
  136. unsigned int contextidr;
  137. unsigned long flags;
  138. /* Put ourselves into a possible trigger state */
  139. if (!adreno_move_preempt_state(adreno_dev,
  140. ADRENO_PREEMPT_NONE, ADRENO_PREEMPT_START))
  141. return;
  142. /* Get the next ringbuffer to preempt in */
  143. next = a5xx_next_ringbuffer(adreno_dev);
  144. /*
  145. * Nothing to do if every ringbuffer is empty or if the current
  146. * ringbuffer is the only active one
  147. */
  148. if (next == NULL || next == adreno_dev->cur_rb) {
  149. /*
  150. * Update any critical things that might have been skipped while
  151. * we were looking for a new ringbuffer
  152. */
  153. if (next != NULL) {
  154. _update_wptr(adreno_dev, false);
  155. mod_timer(&adreno_dev->dispatcher.timer,
  156. adreno_dev->cur_rb->dispatch_q.expires);
  157. }
  158. adreno_set_preempt_state(adreno_dev, ADRENO_PREEMPT_NONE);
  159. return;
  160. }
  161. /* Turn off the dispatcher timer */
  162. del_timer(&adreno_dev->dispatcher.timer);
  163. /*
  164. * This is the most critical section - we need to take care not to race
  165. * until we have programmed the CP for the switch
  166. */
  167. spin_lock_irqsave(&next->preempt_lock, flags);
  168. /* Get the pagetable from the pagetable info. */
  169. kgsl_sharedmem_readq(device->scratch, &ttbr0,
  170. SCRATCH_RB_OFFSET(next->id, ttbr0));
  171. kgsl_sharedmem_readl(device->scratch, &contextidr,
  172. SCRATCH_RB_OFFSET(next->id, contextidr));
  173. kgsl_sharedmem_writel(next->preemption_desc,
  174. PREEMPT_RECORD(wptr), next->wptr);
  175. spin_unlock_irqrestore(&next->preempt_lock, flags);
  176. /* And write it to the smmu info */
  177. if (kgsl_mmu_is_perprocess(&device->mmu)) {
  178. kgsl_sharedmem_writeq(iommu->smmu_info,
  179. PREEMPT_SMMU_RECORD(ttbr0), ttbr0);
  180. kgsl_sharedmem_writel(iommu->smmu_info,
  181. PREEMPT_SMMU_RECORD(context_idr), contextidr);
  182. }
  183. kgsl_regwrite(device, A5XX_CP_CONTEXT_SWITCH_RESTORE_ADDR_LO,
  184. lower_32_bits(next->preemption_desc->gpuaddr));
  185. kgsl_regwrite(device, A5XX_CP_CONTEXT_SWITCH_RESTORE_ADDR_HI,
  186. upper_32_bits(next->preemption_desc->gpuaddr));
  187. adreno_dev->next_rb = next;
  188. /* Start the timer to detect a stuck preemption */
  189. mod_timer(&adreno_dev->preempt.timer,
  190. jiffies + msecs_to_jiffies(ADRENO_PREEMPT_TIMEOUT));
  191. trace_adreno_preempt_trigger(adreno_dev->cur_rb->id, adreno_dev->next_rb->id,
  192. 1, 0);
  193. adreno_set_preempt_state(adreno_dev, ADRENO_PREEMPT_TRIGGERED);
  194. /* Trigger the preemption */
  195. kgsl_regwrite(device, A5XX_CP_CONTEXT_SWITCH_CNTL, 1);
  196. }
  197. void a5xx_preempt_callback(struct adreno_device *adreno_dev, int bit)
  198. {
  199. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  200. unsigned int status;
  201. if (!adreno_move_preempt_state(adreno_dev,
  202. ADRENO_PREEMPT_TRIGGERED, ADRENO_PREEMPT_PENDING))
  203. return;
  204. kgsl_regread(device, A5XX_CP_CONTEXT_SWITCH_CNTL, &status);
  205. if (status != 0) {
  206. dev_err(KGSL_DEVICE(adreno_dev)->dev,
  207. "preempt interrupt with non-zero status: %X\n",
  208. status);
  209. /*
  210. * Under the assumption that this is a race between the
  211. * interrupt and the register, schedule the worker to clean up.
  212. * If the status still hasn't resolved itself by the time we get
  213. * there then we have to assume something bad happened
  214. */
  215. adreno_set_preempt_state(adreno_dev, ADRENO_PREEMPT_COMPLETE);
  216. adreno_dispatcher_schedule(device);
  217. return;
  218. }
  219. del_timer(&adreno_dev->preempt.timer);
  220. trace_adreno_preempt_done(adreno_dev->cur_rb->id, adreno_dev->next_rb->id, 0, 0);
  221. adreno_dev->prev_rb = adreno_dev->cur_rb;
  222. adreno_dev->cur_rb = adreno_dev->next_rb;
  223. adreno_dev->next_rb = NULL;
  224. /* Update the wptr if it changed while preemption was ongoing */
  225. _update_wptr(adreno_dev, true);
  226. /* Update the dispatcher timer for the new command queue */
  227. mod_timer(&adreno_dev->dispatcher.timer,
  228. adreno_dev->cur_rb->dispatch_q.expires);
  229. adreno_set_preempt_state(adreno_dev, ADRENO_PREEMPT_NONE);
  230. a5xx_preemption_trigger(adreno_dev);
  231. }
  232. void a5xx_preemption_schedule(struct adreno_device *adreno_dev)
  233. {
  234. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  235. if (!adreno_is_preemption_enabled(adreno_dev))
  236. return;
  237. mutex_lock(&device->mutex);
  238. if (adreno_in_preempt_state(adreno_dev, ADRENO_PREEMPT_COMPLETE))
  239. _a5xx_preemption_done(adreno_dev);
  240. a5xx_preemption_trigger(adreno_dev);
  241. mutex_unlock(&device->mutex);
  242. }
  243. u32 a5xx_preemption_pre_ibsubmit(struct adreno_device *adreno_dev,
  244. struct adreno_ringbuffer *rb,
  245. struct adreno_context *drawctxt, u32 *cmds)
  246. {
  247. unsigned int *cmds_orig = cmds;
  248. uint64_t gpuaddr = rb->preemption_desc->gpuaddr;
  249. unsigned int preempt_style = 0;
  250. if (!adreno_is_preemption_enabled(adreno_dev))
  251. return 0;
  252. if (drawctxt) {
  253. /*
  254. * Preemption from secure to unsecure needs Zap shader to be
  255. * run to clear all secure content. CP does not know during
  256. * preemption if it is switching between secure and unsecure
  257. * contexts so restrict Secure contexts to be preempted at
  258. * ringbuffer level.
  259. */
  260. if (drawctxt->base.flags & KGSL_CONTEXT_SECURE)
  261. preempt_style = KGSL_CONTEXT_PREEMPT_STYLE_RINGBUFFER;
  262. else
  263. preempt_style = FIELD_GET(KGSL_CONTEXT_PREEMPT_STYLE_MASK,
  264. drawctxt->base.flags);
  265. }
  266. /*
  267. * CP_PREEMPT_ENABLE_GLOBAL(global preemption) can only be set by KMD
  268. * in ringbuffer.
  269. * 1) set global preemption to 0x0 to disable global preemption.
  270. * Only RB level preemption is allowed in this mode
  271. * 2) Set global preemption to defer(0x2) for finegrain preemption.
  272. * when global preemption is set to defer(0x2),
  273. * CP_PREEMPT_ENABLE_LOCAL(local preemption) determines the
  274. * preemption point. Local preemption
  275. * can be enabled by both UMD(within IB) and KMD.
  276. */
  277. *cmds++ = cp_type7_packet(CP_PREEMPT_ENABLE_GLOBAL, 1);
  278. *cmds++ = ((preempt_style == KGSL_CONTEXT_PREEMPT_STYLE_FINEGRAIN)
  279. ? 2 : 0);
  280. /* Turn CP protection OFF */
  281. cmds += cp_protected_mode(adreno_dev, cmds, 0);
  282. /*
  283. * CP during context switch will save context switch info to
  284. * a5xx_cp_preemption_record pointed by CONTEXT_SWITCH_SAVE_ADDR
  285. */
  286. *cmds++ = cp_type4_packet(A5XX_CP_CONTEXT_SWITCH_SAVE_ADDR_LO, 1);
  287. *cmds++ = lower_32_bits(gpuaddr);
  288. *cmds++ = cp_type4_packet(A5XX_CP_CONTEXT_SWITCH_SAVE_ADDR_HI, 1);
  289. *cmds++ = upper_32_bits(gpuaddr);
  290. /* Turn CP protection ON */
  291. cmds += cp_protected_mode(adreno_dev, cmds, 1);
  292. /*
  293. * Enable local preemption for finegrain preemption in case of
  294. * a misbehaving IB
  295. */
  296. if (preempt_style == KGSL_CONTEXT_PREEMPT_STYLE_FINEGRAIN) {
  297. *cmds++ = cp_type7_packet(CP_PREEMPT_ENABLE_LOCAL, 1);
  298. *cmds++ = 1;
  299. } else {
  300. *cmds++ = cp_type7_packet(CP_PREEMPT_ENABLE_LOCAL, 1);
  301. *cmds++ = 0;
  302. }
  303. /* Enable CP_CONTEXT_SWITCH_YIELD packets in the IB2s */
  304. *cmds++ = cp_type7_packet(CP_YIELD_ENABLE, 1);
  305. *cmds++ = 2;
  306. return (unsigned int) (cmds - cmds_orig);
  307. }
  308. unsigned int a5xx_preemption_post_ibsubmit(struct adreno_device *adreno_dev,
  309. unsigned int *cmds)
  310. {
  311. int dwords = 0;
  312. if (!adreno_is_preemption_enabled(adreno_dev))
  313. return 0;
  314. cmds[dwords++] = cp_type7_packet(CP_CONTEXT_SWITCH_YIELD, 4);
  315. /* Write NULL to the address to skip the data write */
  316. dwords += cp_gpuaddr(adreno_dev, &cmds[dwords], 0x0);
  317. cmds[dwords++] = 1;
  318. /* generate interrupt on preemption completion */
  319. cmds[dwords++] = 1;
  320. return dwords;
  321. }
  322. void a5xx_preemption_start(struct adreno_device *adreno_dev)
  323. {
  324. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  325. struct kgsl_iommu *iommu = KGSL_IOMMU(device);
  326. struct adreno_ringbuffer *rb;
  327. unsigned int i;
  328. if (!adreno_is_preemption_enabled(adreno_dev))
  329. return;
  330. /* Force the state to be clear */
  331. adreno_set_preempt_state(adreno_dev, ADRENO_PREEMPT_NONE);
  332. /* Only set up smmu info when per-process pagetables are enabled */
  333. if (kgsl_mmu_is_perprocess(&device->mmu)) {
  334. /* smmu_info is allocated and mapped in a5xx_preemption_iommu_init */
  335. kgsl_sharedmem_writel(iommu->smmu_info,
  336. PREEMPT_SMMU_RECORD(magic), A5XX_CP_SMMU_INFO_MAGIC_REF);
  337. kgsl_sharedmem_writeq(iommu->smmu_info,
  338. PREEMPT_SMMU_RECORD(ttbr0), MMU_DEFAULT_TTBR0(device));
  339. /* The CP doesn't use the asid record, so poison it */
  340. kgsl_sharedmem_writel(iommu->smmu_info,
  341. PREEMPT_SMMU_RECORD(asid), 0xDECAFBAD);
  342. kgsl_sharedmem_writel(iommu->smmu_info,
  343. PREEMPT_SMMU_RECORD(context_idr), 0);
  344. kgsl_regwrite(device, A5XX_CP_CONTEXT_SWITCH_SMMU_INFO_LO,
  345. lower_32_bits(iommu->smmu_info->gpuaddr));
  346. kgsl_regwrite(device, A5XX_CP_CONTEXT_SWITCH_SMMU_INFO_HI,
  347. upper_32_bits(iommu->smmu_info->gpuaddr));
  348. }
  349. FOR_EACH_RINGBUFFER(adreno_dev, rb, i) {
  350. /*
  351. * preemption_desc is allocated and mapped at init time,
  352. * so no need to check sharedmem_writel return value
  353. */
  354. kgsl_sharedmem_writel(rb->preemption_desc,
  355. PREEMPT_RECORD(rptr), 0);
  356. kgsl_sharedmem_writel(rb->preemption_desc,
  357. PREEMPT_RECORD(wptr), 0);
  358. adreno_ringbuffer_set_pagetable(device, rb,
  359. device->mmu.defaultpagetable);
  360. }
  361. }
  362. static int a5xx_preemption_ringbuffer_init(struct adreno_device *adreno_dev,
  363. struct adreno_ringbuffer *rb, uint64_t counteraddr)
  364. {
  365. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  366. if (IS_ERR_OR_NULL(rb->preemption_desc))
  367. rb->preemption_desc = kgsl_allocate_global(device,
  368. A5XX_CP_CTXRECORD_SIZE_IN_BYTES, SZ_16K, 0,
  369. KGSL_MEMDESC_PRIVILEGED, "preemption_desc");
  370. if (IS_ERR(rb->preemption_desc))
  371. return PTR_ERR(rb->preemption_desc);
  372. kgsl_sharedmem_writel(rb->preemption_desc,
  373. PREEMPT_RECORD(magic), A5XX_CP_CTXRECORD_MAGIC_REF);
  374. kgsl_sharedmem_writel(rb->preemption_desc,
  375. PREEMPT_RECORD(info), 0);
  376. kgsl_sharedmem_writel(rb->preemption_desc,
  377. PREEMPT_RECORD(data), 0);
  378. kgsl_sharedmem_writel(rb->preemption_desc,
  379. PREEMPT_RECORD(cntl), A5XX_CP_RB_CNTL_DEFAULT);
  380. kgsl_sharedmem_writel(rb->preemption_desc,
  381. PREEMPT_RECORD(rptr), 0);
  382. kgsl_sharedmem_writel(rb->preemption_desc,
  383. PREEMPT_RECORD(wptr), 0);
  384. kgsl_sharedmem_writeq(rb->preemption_desc,
  385. PREEMPT_RECORD(rptr_addr), SCRATCH_RB_GPU_ADDR(device,
  386. rb->id, rptr));
  387. kgsl_sharedmem_writeq(rb->preemption_desc,
  388. PREEMPT_RECORD(rbase), rb->buffer_desc->gpuaddr);
  389. kgsl_sharedmem_writeq(rb->preemption_desc,
  390. PREEMPT_RECORD(counter), counteraddr);
  391. return 0;
  392. }
  393. int a5xx_preemption_init(struct adreno_device *adreno_dev)
  394. {
  395. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  396. struct kgsl_iommu *iommu = KGSL_IOMMU(device);
  397. struct adreno_preemption *preempt = &adreno_dev->preempt;
  398. struct adreno_ringbuffer *rb;
  399. int ret;
  400. unsigned int i;
  401. uint64_t addr;
  402. /* We are dependent on IOMMU to make preemption go on the CP side */
  403. if (kgsl_mmu_get_mmutype(device) != KGSL_MMU_TYPE_IOMMU)
  404. return -ENODEV;
  405. INIT_WORK(&preempt->work, _a5xx_preemption_worker);
  406. /* Allocate mem for storing preemption counters */
  407. if (IS_ERR_OR_NULL(preempt->scratch))
  408. preempt->scratch = kgsl_allocate_global(device,
  409. adreno_dev->num_ringbuffers *
  410. A5XX_CP_CTXRECORD_PREEMPTION_COUNTER_SIZE, 0, 0, 0,
  411. "preemption_counters");
  412. ret = PTR_ERR_OR_ZERO(preempt->scratch);
  413. if (ret)
  414. return ret;
  415. addr = preempt->scratch->gpuaddr;
  416. /* Allocate mem for storing preemption switch record */
  417. FOR_EACH_RINGBUFFER(adreno_dev, rb, i) {
  418. ret = a5xx_preemption_ringbuffer_init(adreno_dev, rb, addr);
  419. if (ret)
  420. return ret;
  421. addr += A5XX_CP_CTXRECORD_PREEMPTION_COUNTER_SIZE;
  422. }
  423. /* Allocate mem for storing preemption smmu record */
  424. if (kgsl_mmu_is_perprocess(&device->mmu) && IS_ERR_OR_NULL(iommu->smmu_info))
  425. iommu->smmu_info = kgsl_allocate_global(device, PAGE_SIZE, 0,
  426. KGSL_MEMFLAGS_GPUREADONLY, KGSL_MEMDESC_PRIVILEGED,
  427. "smmu_info");
  428. if (IS_ERR(iommu->smmu_info))
  429. return PTR_ERR(iommu->smmu_info);
  430. set_bit(ADRENO_DEVICE_PREEMPTION, &adreno_dev->priv);
  431. return 0;
  432. }