adreno_drawctxt.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2002,2007-2021, The Linux Foundation. All rights reserved.
  4. * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
  5. */
  6. #include <linux/debugfs.h>
  7. #include "adreno.h"
  8. #include "adreno_trace.h"
  9. static void wait_callback(struct kgsl_device *device,
  10. struct kgsl_event_group *group, void *priv, int result)
  11. {
  12. struct adreno_context *drawctxt = priv;
  13. wake_up_all(&drawctxt->waiting);
  14. }
  15. static int _check_context_timestamp(struct kgsl_device *device,
  16. struct kgsl_context *context, unsigned int timestamp)
  17. {
  18. /* Bail if the drawctxt has been invalidated or destroyed */
  19. if (kgsl_context_is_bad(context))
  20. return 1;
  21. return kgsl_check_timestamp(device, context, timestamp);
  22. }
  23. /**
  24. * adreno_drawctxt_dump() - dump information about a draw context
  25. * @device: KGSL device that owns the context
  26. * @context: KGSL context to dump information about
  27. *
  28. * Dump specific information about the context to the kernel log. Used for
  29. * fence timeout callbacks
  30. */
  31. void adreno_drawctxt_dump(struct kgsl_device *device,
  32. struct kgsl_context *context)
  33. {
  34. unsigned int queue, start, retire;
  35. struct adreno_context *drawctxt = ADRENO_CONTEXT(context);
  36. int index, pos;
  37. char buf[120];
  38. kgsl_readtimestamp(device, context, KGSL_TIMESTAMP_QUEUED, &queue);
  39. kgsl_readtimestamp(device, context, KGSL_TIMESTAMP_CONSUMED, &start);
  40. kgsl_readtimestamp(device, context, KGSL_TIMESTAMP_RETIRED, &retire);
  41. /*
  42. * We may have kgsl sync obj timer running, which also uses same
  43. * lock, take a lock with software interrupt disabled (bh)
  44. * to avoid spin lock recursion.
  45. *
  46. * Use Spin trylock because dispatcher can acquire drawctxt->lock
  47. * if context is pending and the fence it is waiting on just got
  48. * signalled. Dispatcher acquires drawctxt->lock and tries to
  49. * delete the sync obj timer using del_timer_sync().
  50. * del_timer_sync() waits till timer and its pending handlers
  51. * are deleted. But if the timer expires at the same time,
  52. * timer handler could be waiting on drawctxt->lock leading to a
  53. * deadlock. To prevent this use spin_trylock_bh.
  54. */
  55. if (!spin_trylock_bh(&drawctxt->lock)) {
  56. dev_err(device->dev, " context[%u]: could not get lock\n",
  57. context->id);
  58. return;
  59. }
  60. dev_err(device->dev,
  61. " context[%u]: queue=%u, submit=%u, start=%u, retire=%u\n",
  62. context->id, queue, drawctxt->submitted_timestamp,
  63. start, retire);
  64. if (drawctxt->drawqueue_head != drawctxt->drawqueue_tail) {
  65. struct kgsl_drawobj *drawobj =
  66. drawctxt->drawqueue[drawctxt->drawqueue_head];
  67. if (test_bit(ADRENO_CONTEXT_FENCE_LOG, &context->priv)) {
  68. dev_err(device->dev,
  69. " possible deadlock. Context %u might be blocked for itself\n",
  70. context->id);
  71. goto stats;
  72. }
  73. if (!kref_get_unless_zero(&drawobj->refcount))
  74. goto stats;
  75. if (drawobj->type == SYNCOBJ_TYPE) {
  76. struct kgsl_drawobj_sync *syncobj = SYNCOBJ(drawobj);
  77. if (kgsl_drawobj_events_pending(syncobj)) {
  78. dev_err(device->dev,
  79. " context[%u] (ts=%u) Active sync points:\n",
  80. context->id, drawobj->timestamp);
  81. kgsl_dump_syncpoints(device, syncobj);
  82. }
  83. }
  84. kgsl_drawobj_put(drawobj);
  85. }
  86. stats:
  87. memset(buf, 0, sizeof(buf));
  88. pos = 0;
  89. for (index = 0; index < SUBMIT_RETIRE_TICKS_SIZE; index++) {
  90. uint64_t msecs;
  91. unsigned int usecs;
  92. if (!drawctxt->submit_retire_ticks[index])
  93. continue;
  94. msecs = drawctxt->submit_retire_ticks[index] * 10;
  95. usecs = do_div(msecs, 192);
  96. usecs = do_div(msecs, 1000);
  97. pos += scnprintf(buf + pos, sizeof(buf) - pos, "%u.%0u ",
  98. (unsigned int)msecs, usecs);
  99. }
  100. dev_err(device->dev, " context[%u]: submit times: %s\n",
  101. context->id, buf);
  102. spin_unlock_bh(&drawctxt->lock);
  103. }
  104. /**
  105. * adreno_drawctxt_wait() - sleep until a timestamp expires
  106. * @adreno_dev: pointer to the adreno_device struct
  107. * @drawctxt: Pointer to the draw context to sleep for
  108. * @timetamp: Timestamp to wait on
  109. * @timeout: Number of jiffies to wait (0 for infinite)
  110. *
  111. * Register an event to wait for a timestamp on a context and sleep until it
  112. * has past. Returns < 0 on error, -ETIMEDOUT if the timeout expires or 0
  113. * on success
  114. */
  115. int adreno_drawctxt_wait(struct adreno_device *adreno_dev,
  116. struct kgsl_context *context,
  117. uint32_t timestamp, unsigned int timeout)
  118. {
  119. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  120. struct adreno_context *drawctxt = ADRENO_CONTEXT(context);
  121. int ret;
  122. long ret_temp;
  123. if (kgsl_context_detached(context))
  124. return -ENOENT;
  125. if (kgsl_context_invalid(context))
  126. return -EDEADLK;
  127. trace_adreno_drawctxt_wait_start(-1, context->id, timestamp);
  128. ret = kgsl_add_event(device, &context->events, timestamp,
  129. wait_callback, (void *) drawctxt);
  130. if (ret)
  131. goto done;
  132. /*
  133. * If timeout is 0, wait forever. msecs_to_jiffies will force
  134. * values larger than INT_MAX to an infinite timeout.
  135. */
  136. if (timeout == 0)
  137. timeout = UINT_MAX;
  138. ret_temp = wait_event_interruptible_timeout(drawctxt->waiting,
  139. _check_context_timestamp(device, context, timestamp),
  140. msecs_to_jiffies(timeout));
  141. if (ret_temp <= 0) {
  142. kgsl_cancel_event(device, &context->events, timestamp,
  143. wait_callback, (void *)drawctxt);
  144. ret = ret_temp ? (int)ret_temp : -ETIMEDOUT;
  145. goto done;
  146. }
  147. ret = 0;
  148. /* -EDEADLK if the context was invalidated while we were waiting */
  149. if (kgsl_context_invalid(context))
  150. ret = -EDEADLK;
  151. /* Return -EINVAL if the context was detached while we were waiting */
  152. if (kgsl_context_detached(context))
  153. ret = -ENOENT;
  154. done:
  155. trace_adreno_drawctxt_wait_done(-1, context->id, timestamp, ret);
  156. return ret;
  157. }
  158. /**
  159. * adreno_drawctxt_wait_rb() - Wait for the last RB timestamp at which this
  160. * context submitted a command to the corresponding RB
  161. * @adreno_dev: The device on which the timestamp is active
  162. * @context: The context which subbmitted command to RB
  163. * @timestamp: The RB timestamp of last command submitted to RB by context
  164. * @timeout: Timeout value for the wait
  165. * Caller must hold the device mutex
  166. */
  167. static int adreno_drawctxt_wait_rb(struct adreno_device *adreno_dev,
  168. struct kgsl_context *context,
  169. uint32_t timestamp, unsigned int timeout)
  170. {
  171. struct adreno_context *drawctxt = ADRENO_CONTEXT(context);
  172. int ret = 0;
  173. /*
  174. * If the context is invalid (OR) not submitted commands to GPU
  175. * then return immediately - we may end up waiting for a timestamp
  176. * that will never come
  177. */
  178. if (kgsl_context_invalid(context) ||
  179. !test_bit(KGSL_CONTEXT_PRIV_SUBMITTED, &context->priv))
  180. goto done;
  181. trace_adreno_drawctxt_wait_start(drawctxt->rb->id, context->id,
  182. timestamp);
  183. ret = adreno_ringbuffer_waittimestamp(drawctxt->rb, timestamp, timeout);
  184. done:
  185. trace_adreno_drawctxt_wait_done(drawctxt->rb->id, context->id,
  186. timestamp, ret);
  187. return ret;
  188. }
  189. static int drawctxt_detach_drawobjs(struct adreno_context *drawctxt,
  190. struct kgsl_drawobj **list)
  191. {
  192. int count = 0;
  193. while (drawctxt->drawqueue_head != drawctxt->drawqueue_tail) {
  194. struct kgsl_drawobj *drawobj =
  195. drawctxt->drawqueue[drawctxt->drawqueue_head];
  196. drawctxt->drawqueue_head = (drawctxt->drawqueue_head + 1) %
  197. ADRENO_CONTEXT_DRAWQUEUE_SIZE;
  198. list[count++] = drawobj;
  199. }
  200. return count;
  201. }
  202. /**
  203. * adreno_drawctxt_invalidate() - Invalidate an adreno draw context
  204. * @device: Pointer to the KGSL device structure for the GPU
  205. * @context: Pointer to the KGSL context structure
  206. *
  207. * Invalidate the context and remove all queued commands and cancel any pending
  208. * waiters
  209. */
  210. void adreno_drawctxt_invalidate(struct kgsl_device *device,
  211. struct kgsl_context *context)
  212. {
  213. struct adreno_context *drawctxt = ADRENO_CONTEXT(context);
  214. struct kgsl_drawobj *list[ADRENO_CONTEXT_DRAWQUEUE_SIZE];
  215. int i, count;
  216. trace_adreno_drawctxt_invalidate(drawctxt);
  217. spin_lock(&drawctxt->lock);
  218. set_bit(KGSL_CONTEXT_PRIV_INVALID, &context->priv);
  219. /*
  220. * set the timestamp to the last value since the context is invalidated
  221. * and we want the pending events for this context to go away
  222. */
  223. kgsl_sharedmem_writel(device->memstore,
  224. KGSL_MEMSTORE_OFFSET(context->id, soptimestamp),
  225. drawctxt->timestamp);
  226. kgsl_sharedmem_writel(device->memstore,
  227. KGSL_MEMSTORE_OFFSET(context->id, eoptimestamp),
  228. drawctxt->timestamp);
  229. /* Get rid of commands still waiting in the queue */
  230. count = drawctxt_detach_drawobjs(drawctxt, list);
  231. spin_unlock(&drawctxt->lock);
  232. for (i = 0; i < count; i++) {
  233. kgsl_cancel_events_timestamp(device, &context->events,
  234. list[i]->timestamp);
  235. kgsl_drawobj_destroy(list[i]);
  236. }
  237. /* Make sure all pending events are processed or cancelled */
  238. kgsl_flush_event_group(device, &context->events);
  239. /* Give the bad news to everybody waiting around */
  240. wake_up_all(&drawctxt->waiting);
  241. wake_up_all(&drawctxt->wq);
  242. wake_up_all(&drawctxt->timeout);
  243. }
  244. void adreno_drawctxt_set_guilty(struct kgsl_device *device,
  245. struct kgsl_context *context)
  246. {
  247. if (!context)
  248. return;
  249. context->reset_status = KGSL_CTX_STAT_GUILTY_CONTEXT_RESET_EXT;
  250. adreno_drawctxt_invalidate(device, context);
  251. }
  252. #define KGSL_CONTEXT_PRIORITY_MED 0x8
  253. /**
  254. * adreno_drawctxt_create - create a new adreno draw context
  255. * @dev_priv: the owner of the context
  256. * @flags: flags for the context (passed from user space)
  257. *
  258. * Create and return a new draw context for the 3D core.
  259. */
  260. struct kgsl_context *
  261. adreno_drawctxt_create(struct kgsl_device_private *dev_priv,
  262. uint32_t *flags)
  263. {
  264. struct adreno_context *drawctxt;
  265. struct kgsl_device *device = dev_priv->device;
  266. struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
  267. const struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
  268. int ret;
  269. unsigned int local;
  270. local = *flags & (KGSL_CONTEXT_PREAMBLE |
  271. KGSL_CONTEXT_NO_GMEM_ALLOC |
  272. KGSL_CONTEXT_PER_CONTEXT_TS |
  273. KGSL_CONTEXT_USER_GENERATED_TS |
  274. KGSL_CONTEXT_NO_FAULT_TOLERANCE |
  275. KGSL_CONTEXT_INVALIDATE_ON_FAULT |
  276. KGSL_CONTEXT_CTX_SWITCH |
  277. KGSL_CONTEXT_PRIORITY_MASK |
  278. KGSL_CONTEXT_TYPE_MASK |
  279. KGSL_CONTEXT_PWR_CONSTRAINT |
  280. KGSL_CONTEXT_IFH_NOP |
  281. KGSL_CONTEXT_SECURE |
  282. KGSL_CONTEXT_PREEMPT_STYLE_MASK |
  283. KGSL_CONTEXT_LPAC |
  284. KGSL_CONTEXT_NO_SNAPSHOT |
  285. KGSL_CONTEXT_FAULT_INFO);
  286. /* Check for errors before trying to initialize */
  287. /* If preemption is not supported, ignore preemption request */
  288. if (!adreno_preemption_feature_set(adreno_dev))
  289. local &= ~KGSL_CONTEXT_PREEMPT_STYLE_MASK;
  290. /* We no longer support legacy context switching */
  291. if ((local & KGSL_CONTEXT_PREAMBLE) == 0 ||
  292. (local & KGSL_CONTEXT_NO_GMEM_ALLOC) == 0) {
  293. dev_err_once(device->dev,
  294. "legacy context switch not supported\n");
  295. return ERR_PTR(-EINVAL);
  296. }
  297. /* Make sure that our target can support secure contexts if requested */
  298. if (!kgsl_mmu_is_secured(&dev_priv->device->mmu) &&
  299. (local & KGSL_CONTEXT_SECURE)) {
  300. dev_err_once(device->dev, "Secure context not supported\n");
  301. return ERR_PTR(-EOPNOTSUPP);
  302. }
  303. if ((local & KGSL_CONTEXT_LPAC) &&
  304. (!(adreno_dev->lpac_enabled))) {
  305. dev_err_once(device->dev, "LPAC context not supported\n");
  306. return ERR_PTR(-EOPNOTSUPP);
  307. }
  308. if ((local & KGSL_CONTEXT_LPAC) && (local & KGSL_CONTEXT_SECURE)) {
  309. dev_err_once(device->dev, "LPAC secure context not supported\n");
  310. return ERR_PTR(-EOPNOTSUPP);
  311. }
  312. drawctxt = kzalloc(sizeof(struct adreno_context), GFP_KERNEL);
  313. if (drawctxt == NULL)
  314. return ERR_PTR(-ENOMEM);
  315. drawctxt->timestamp = 0;
  316. drawctxt->base.flags = local;
  317. /* Always enable per-context timestamps */
  318. drawctxt->base.flags |= KGSL_CONTEXT_PER_CONTEXT_TS;
  319. drawctxt->type = (drawctxt->base.flags & KGSL_CONTEXT_TYPE_MASK)
  320. >> KGSL_CONTEXT_TYPE_SHIFT;
  321. spin_lock_init(&drawctxt->lock);
  322. init_waitqueue_head(&drawctxt->wq);
  323. init_waitqueue_head(&drawctxt->waiting);
  324. init_waitqueue_head(&drawctxt->timeout);
  325. /* If the priority is not set by user, set it for them */
  326. if ((drawctxt->base.flags & KGSL_CONTEXT_PRIORITY_MASK) ==
  327. KGSL_CONTEXT_PRIORITY_UNDEF)
  328. drawctxt->base.flags |= (KGSL_CONTEXT_PRIORITY_MED <<
  329. KGSL_CONTEXT_PRIORITY_SHIFT);
  330. /* Store the context priority */
  331. drawctxt->base.priority =
  332. (drawctxt->base.flags & KGSL_CONTEXT_PRIORITY_MASK) >>
  333. KGSL_CONTEXT_PRIORITY_SHIFT;
  334. /*
  335. * Now initialize the common part of the context. This allocates the
  336. * context id, and then possibly another thread could look it up.
  337. * So we want all of our initializtion that doesn't require the context
  338. * id to be done before this call.
  339. */
  340. ret = kgsl_context_init(dev_priv, &drawctxt->base);
  341. if (ret != 0) {
  342. kfree(drawctxt);
  343. return ERR_PTR(ret);
  344. }
  345. kgsl_sharedmem_writel(device->memstore,
  346. KGSL_MEMSTORE_OFFSET(drawctxt->base.id, soptimestamp),
  347. 0);
  348. kgsl_sharedmem_writel(device->memstore,
  349. KGSL_MEMSTORE_OFFSET(drawctxt->base.id, eoptimestamp),
  350. 0);
  351. adreno_context_debugfs_init(ADRENO_DEVICE(device), drawctxt);
  352. INIT_LIST_HEAD(&drawctxt->active_node);
  353. INIT_LIST_HEAD(&drawctxt->hw_fence_list);
  354. INIT_LIST_HEAD(&drawctxt->hw_fence_inflight_list);
  355. if (adreno_dev->dispatch_ops && adreno_dev->dispatch_ops->setup_context)
  356. adreno_dev->dispatch_ops->setup_context(adreno_dev, drawctxt);
  357. if (gpudev->preemption_context_init) {
  358. ret = gpudev->preemption_context_init(&drawctxt->base);
  359. if (ret != 0) {
  360. kgsl_context_detach(&drawctxt->base);
  361. return ERR_PTR(ret);
  362. }
  363. }
  364. /* copy back whatever flags we dediced were valid */
  365. *flags = drawctxt->base.flags;
  366. return &drawctxt->base;
  367. }
  368. static void wait_for_timestamp_rb(struct kgsl_device *device,
  369. struct adreno_context *drawctxt)
  370. {
  371. struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
  372. struct kgsl_context *context = &drawctxt->base;
  373. int ret;
  374. /*
  375. * internal_timestamp is set in adreno_ringbuffer_addcmds,
  376. * which holds the device mutex.
  377. */
  378. mutex_lock(&device->mutex);
  379. /*
  380. * Wait for the last global timestamp to pass before continuing.
  381. * The maxumum wait time is 30s, some large IB's can take longer
  382. * than 10s and if hang happens then the time for the context's
  383. * commands to retire will be greater than 10s. 30s should be sufficient
  384. * time to wait for the commands even if a hang happens.
  385. */
  386. ret = adreno_drawctxt_wait_rb(adreno_dev, &drawctxt->base,
  387. drawctxt->internal_timestamp, 30 * 1000);
  388. /*
  389. * If the wait for global fails due to timeout then mark it as
  390. * context detach timeout fault and schedule dispatcher to kick
  391. * in GPU recovery. For a ADRENO_CTX_DETATCH_TIMEOUT_FAULT we clear
  392. * the policy and invalidate the context. If EAGAIN error is returned
  393. * then recovery will kick in and there will be no more commands in the
  394. * RB pipe from this context which is what we are waiting for, so ignore
  395. * -EAGAIN error.
  396. */
  397. if (ret && ret != -EAGAIN) {
  398. dev_err(device->dev,
  399. "Wait for global ctx=%u ts=%u type=%d error=%d\n",
  400. drawctxt->base.id, drawctxt->internal_timestamp,
  401. drawctxt->type, ret);
  402. adreno_set_gpu_fault(adreno_dev,
  403. ADRENO_CTX_DETATCH_TIMEOUT_FAULT);
  404. mutex_unlock(&device->mutex);
  405. /* Schedule dispatcher to kick in recovery */
  406. adreno_dispatcher_schedule(device);
  407. /* Wait for context to be invalidated and release context */
  408. wait_event_interruptible_timeout(drawctxt->timeout,
  409. kgsl_context_invalid(&drawctxt->base),
  410. msecs_to_jiffies(5000));
  411. return;
  412. }
  413. kgsl_sharedmem_writel(device->memstore,
  414. KGSL_MEMSTORE_OFFSET(context->id, soptimestamp),
  415. drawctxt->timestamp);
  416. kgsl_sharedmem_writel(device->memstore,
  417. KGSL_MEMSTORE_OFFSET(context->id, eoptimestamp),
  418. drawctxt->timestamp);
  419. adreno_profile_process_results(adreno_dev);
  420. mutex_unlock(&device->mutex);
  421. }
  422. void adreno_drawctxt_detach(struct kgsl_context *context)
  423. {
  424. struct kgsl_device *device;
  425. struct adreno_device *adreno_dev;
  426. const struct adreno_gpudev *gpudev;
  427. struct adreno_context *drawctxt;
  428. int count, i;
  429. struct kgsl_drawobj *list[ADRENO_CONTEXT_DRAWQUEUE_SIZE];
  430. if (context == NULL)
  431. return;
  432. device = context->device;
  433. adreno_dev = ADRENO_DEVICE(device);
  434. gpudev = ADRENO_GPU_DEVICE(adreno_dev);
  435. drawctxt = ADRENO_CONTEXT(context);
  436. spin_lock(&drawctxt->lock);
  437. spin_lock(&adreno_dev->active_list_lock);
  438. list_del_init(&drawctxt->active_node);
  439. spin_unlock(&adreno_dev->active_list_lock);
  440. count = drawctxt_detach_drawobjs(drawctxt, list);
  441. spin_unlock(&drawctxt->lock);
  442. for (i = 0; i < count; i++) {
  443. /*
  444. * If the context is detached while we are waiting for
  445. * the next command in GFT SKIP CMD, print the context
  446. * detached status here.
  447. */
  448. adreno_fault_skipcmd_detached(adreno_dev, drawctxt, list[i]);
  449. kgsl_drawobj_destroy(list[i]);
  450. }
  451. debugfs_remove_recursive(drawctxt->debug_root);
  452. /* The debugfs file has a reference, release it */
  453. if (drawctxt->debug_root)
  454. kgsl_context_put(context);
  455. if (gpudev->context_detach)
  456. gpudev->context_detach(drawctxt);
  457. else
  458. wait_for_timestamp_rb(device, drawctxt);
  459. if (context->user_ctxt_record) {
  460. gpumem_free_entry(context->user_ctxt_record);
  461. /* Put the extra ref from gpumem_alloc_entry() */
  462. kgsl_mem_entry_put(context->user_ctxt_record);
  463. }
  464. /* wake threads waiting to submit commands from this context */
  465. wake_up_all(&drawctxt->waiting);
  466. wake_up_all(&drawctxt->wq);
  467. }
  468. void adreno_drawctxt_destroy(struct kgsl_context *context)
  469. {
  470. struct adreno_context *drawctxt;
  471. struct adreno_device *adreno_dev;
  472. const struct adreno_gpudev *gpudev;
  473. if (context == NULL)
  474. return;
  475. drawctxt = ADRENO_CONTEXT(context);
  476. adreno_dev = ADRENO_DEVICE(context->device);
  477. gpudev = ADRENO_GPU_DEVICE(adreno_dev);
  478. if (gpudev->context_destroy)
  479. gpudev->context_destroy(adreno_dev, drawctxt);
  480. kfree(drawctxt);
  481. }
  482. static void _drawctxt_switch_wait_callback(struct kgsl_device *device,
  483. struct kgsl_event_group *group,
  484. void *priv, int result)
  485. {
  486. struct adreno_context *drawctxt = (struct adreno_context *) priv;
  487. kgsl_context_put(&drawctxt->base);
  488. }
  489. void adreno_put_drawctxt_on_timestamp(struct kgsl_device *device,
  490. struct adreno_context *drawctxt,
  491. struct adreno_ringbuffer *rb, u32 timestamp)
  492. {
  493. if (!drawctxt)
  494. return;
  495. if (kgsl_add_event(device, &rb->events, timestamp,
  496. _drawctxt_switch_wait_callback, drawctxt))
  497. kgsl_context_put(&drawctxt->base);
  498. }
  499. static void _add_context(struct adreno_device *adreno_dev,
  500. struct adreno_context *drawctxt)
  501. {
  502. /* Remove it from the list */
  503. list_del_init(&drawctxt->active_node);
  504. /* And push it to the front */
  505. drawctxt->active_time = jiffies;
  506. list_add(&drawctxt->active_node, &adreno_dev->active_list);
  507. }
  508. static int __count_context(struct adreno_context *drawctxt, void *data)
  509. {
  510. unsigned long expires = drawctxt->active_time + msecs_to_jiffies(100);
  511. return time_after(jiffies, expires) ? 0 : 1;
  512. }
  513. static int __count_drawqueue_context(struct adreno_context *drawctxt,
  514. void *data)
  515. {
  516. unsigned long expires = drawctxt->active_time + msecs_to_jiffies(100);
  517. if (time_after(jiffies, expires))
  518. return 0;
  519. return (&drawctxt->rb->dispatch_q ==
  520. (struct adreno_dispatcher_drawqueue *) data) ? 1 : 0;
  521. }
  522. static int _adreno_count_active_contexts(struct adreno_device *adreno_dev,
  523. int (*func)(struct adreno_context *, void *), void *data)
  524. {
  525. struct adreno_context *ctxt;
  526. int count = 0;
  527. list_for_each_entry(ctxt, &adreno_dev->active_list, active_node) {
  528. if (func(ctxt, data) == 0)
  529. return count;
  530. count++;
  531. }
  532. return count;
  533. }
  534. void adreno_track_context(struct adreno_device *adreno_dev,
  535. struct adreno_dispatcher_drawqueue *drawqueue,
  536. struct adreno_context *drawctxt)
  537. {
  538. struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
  539. spin_lock(&adreno_dev->active_list_lock);
  540. _add_context(adreno_dev, drawctxt);
  541. device->active_context_count =
  542. _adreno_count_active_contexts(adreno_dev,
  543. __count_context, NULL);
  544. if (drawqueue)
  545. drawqueue->active_context_count =
  546. _adreno_count_active_contexts(adreno_dev,
  547. __count_drawqueue_context, drawqueue);
  548. spin_unlock(&adreno_dev->active_list_lock);
  549. }