msm_submitqueue.c 6.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /* Copyright (c) 2017 The Linux Foundation. All rights reserved.
  3. */
  4. #include <linux/kref.h>
  5. #include <linux/uaccess.h>
  6. #include "msm_gpu.h"
  7. int msm_file_private_set_sysprof(struct msm_file_private *ctx,
  8. struct msm_gpu *gpu, int sysprof)
  9. {
  10. /*
  11. * Since pm_runtime and sysprof_active are both refcounts, we
  12. * call apply the new value first, and then unwind the previous
  13. * value
  14. */
  15. switch (sysprof) {
  16. default:
  17. return -EINVAL;
  18. case 2:
  19. pm_runtime_get_sync(&gpu->pdev->dev);
  20. fallthrough;
  21. case 1:
  22. refcount_inc(&gpu->sysprof_active);
  23. fallthrough;
  24. case 0:
  25. break;
  26. }
  27. /* unwind old value: */
  28. switch (ctx->sysprof) {
  29. case 2:
  30. pm_runtime_put_autosuspend(&gpu->pdev->dev);
  31. fallthrough;
  32. case 1:
  33. refcount_dec(&gpu->sysprof_active);
  34. fallthrough;
  35. case 0:
  36. break;
  37. }
  38. ctx->sysprof = sysprof;
  39. return 0;
  40. }
  41. void __msm_file_private_destroy(struct kref *kref)
  42. {
  43. struct msm_file_private *ctx = container_of(kref,
  44. struct msm_file_private, ref);
  45. int i;
  46. for (i = 0; i < ARRAY_SIZE(ctx->entities); i++) {
  47. if (!ctx->entities[i])
  48. continue;
  49. drm_sched_entity_destroy(ctx->entities[i]);
  50. kfree(ctx->entities[i]);
  51. }
  52. msm_gem_address_space_put(ctx->aspace);
  53. kfree(ctx->comm);
  54. kfree(ctx->cmdline);
  55. kfree(ctx);
  56. }
  57. void msm_submitqueue_destroy(struct kref *kref)
  58. {
  59. struct msm_gpu_submitqueue *queue = container_of(kref,
  60. struct msm_gpu_submitqueue, ref);
  61. idr_destroy(&queue->fence_idr);
  62. msm_file_private_put(queue->ctx);
  63. kfree(queue);
  64. }
  65. struct msm_gpu_submitqueue *msm_submitqueue_get(struct msm_file_private *ctx,
  66. u32 id)
  67. {
  68. struct msm_gpu_submitqueue *entry;
  69. if (!ctx)
  70. return NULL;
  71. read_lock(&ctx->queuelock);
  72. list_for_each_entry(entry, &ctx->submitqueues, node) {
  73. if (entry->id == id) {
  74. kref_get(&entry->ref);
  75. read_unlock(&ctx->queuelock);
  76. return entry;
  77. }
  78. }
  79. read_unlock(&ctx->queuelock);
  80. return NULL;
  81. }
  82. void msm_submitqueue_close(struct msm_file_private *ctx)
  83. {
  84. struct msm_gpu_submitqueue *entry, *tmp;
  85. if (!ctx)
  86. return;
  87. /*
  88. * No lock needed in close and there won't
  89. * be any more user ioctls coming our way
  90. */
  91. list_for_each_entry_safe(entry, tmp, &ctx->submitqueues, node) {
  92. list_del(&entry->node);
  93. msm_submitqueue_put(entry);
  94. }
  95. }
  96. static struct drm_sched_entity *
  97. get_sched_entity(struct msm_file_private *ctx, struct msm_ringbuffer *ring,
  98. unsigned ring_nr, enum drm_sched_priority sched_prio)
  99. {
  100. static DEFINE_MUTEX(entity_lock);
  101. unsigned idx = (ring_nr * NR_SCHED_PRIORITIES) + sched_prio;
  102. /* We should have already validated that the requested priority is
  103. * valid by the time we get here.
  104. */
  105. if (WARN_ON(idx >= ARRAY_SIZE(ctx->entities)))
  106. return ERR_PTR(-EINVAL);
  107. mutex_lock(&entity_lock);
  108. if (!ctx->entities[idx]) {
  109. struct drm_sched_entity *entity;
  110. struct drm_gpu_scheduler *sched = &ring->sched;
  111. int ret;
  112. entity = kzalloc(sizeof(*ctx->entities[idx]), GFP_KERNEL);
  113. ret = drm_sched_entity_init(entity, sched_prio, &sched, 1, NULL);
  114. if (ret) {
  115. mutex_unlock(&entity_lock);
  116. kfree(entity);
  117. return ERR_PTR(ret);
  118. }
  119. ctx->entities[idx] = entity;
  120. }
  121. mutex_unlock(&entity_lock);
  122. return ctx->entities[idx];
  123. }
  124. int msm_submitqueue_create(struct drm_device *drm, struct msm_file_private *ctx,
  125. u32 prio, u32 flags, u32 *id)
  126. {
  127. struct msm_drm_private *priv = drm->dev_private;
  128. struct msm_gpu_submitqueue *queue;
  129. enum drm_sched_priority sched_prio;
  130. unsigned ring_nr;
  131. int ret;
  132. if (!ctx)
  133. return -ENODEV;
  134. if (!priv->gpu)
  135. return -ENODEV;
  136. ret = msm_gpu_convert_priority(priv->gpu, prio, &ring_nr, &sched_prio);
  137. if (ret)
  138. return ret;
  139. queue = kzalloc(sizeof(*queue), GFP_KERNEL);
  140. if (!queue)
  141. return -ENOMEM;
  142. kref_init(&queue->ref);
  143. queue->flags = flags;
  144. queue->ring_nr = ring_nr;
  145. queue->entity = get_sched_entity(ctx, priv->gpu->rb[ring_nr],
  146. ring_nr, sched_prio);
  147. if (IS_ERR(queue->entity)) {
  148. ret = PTR_ERR(queue->entity);
  149. kfree(queue);
  150. return ret;
  151. }
  152. write_lock(&ctx->queuelock);
  153. queue->ctx = msm_file_private_get(ctx);
  154. queue->id = ctx->queueid++;
  155. if (id)
  156. *id = queue->id;
  157. idr_init(&queue->fence_idr);
  158. spin_lock_init(&queue->idr_lock);
  159. mutex_init(&queue->lock);
  160. list_add_tail(&queue->node, &ctx->submitqueues);
  161. write_unlock(&ctx->queuelock);
  162. return 0;
  163. }
  164. /*
  165. * Create the default submit-queue (id==0), used for backwards compatibility
  166. * for userspace that pre-dates the introduction of submitqueues.
  167. */
  168. int msm_submitqueue_init(struct drm_device *drm, struct msm_file_private *ctx)
  169. {
  170. struct msm_drm_private *priv = drm->dev_private;
  171. int default_prio, max_priority;
  172. if (!priv->gpu)
  173. return -ENODEV;
  174. max_priority = (priv->gpu->nr_rings * NR_SCHED_PRIORITIES) - 1;
  175. /*
  176. * Pick a medium priority level as default. Lower numeric value is
  177. * higher priority, so round-up to pick a priority that is not higher
  178. * than the middle priority level.
  179. */
  180. default_prio = DIV_ROUND_UP(max_priority, 2);
  181. return msm_submitqueue_create(drm, ctx, default_prio, 0, NULL);
  182. }
  183. static int msm_submitqueue_query_faults(struct msm_gpu_submitqueue *queue,
  184. struct drm_msm_submitqueue_query *args)
  185. {
  186. size_t size = min_t(size_t, args->len, sizeof(queue->faults));
  187. int ret;
  188. /* If a zero length was passed in, return the data size we expect */
  189. if (!args->len) {
  190. args->len = sizeof(queue->faults);
  191. return 0;
  192. }
  193. /* Set the length to the actual size of the data */
  194. args->len = size;
  195. ret = copy_to_user(u64_to_user_ptr(args->data), &queue->faults, size);
  196. return ret ? -EFAULT : 0;
  197. }
  198. int msm_submitqueue_query(struct drm_device *drm, struct msm_file_private *ctx,
  199. struct drm_msm_submitqueue_query *args)
  200. {
  201. struct msm_gpu_submitqueue *queue;
  202. int ret = -EINVAL;
  203. if (args->pad)
  204. return -EINVAL;
  205. queue = msm_submitqueue_get(ctx, args->id);
  206. if (!queue)
  207. return -ENOENT;
  208. if (args->param == MSM_SUBMITQUEUE_PARAM_FAULTS)
  209. ret = msm_submitqueue_query_faults(queue, args);
  210. msm_submitqueue_put(queue);
  211. return ret;
  212. }
  213. int msm_submitqueue_remove(struct msm_file_private *ctx, u32 id)
  214. {
  215. struct msm_gpu_submitqueue *entry;
  216. if (!ctx)
  217. return 0;
  218. /*
  219. * id 0 is the "default" queue and can't be destroyed
  220. * by the user
  221. */
  222. if (!id)
  223. return -ENOENT;
  224. write_lock(&ctx->queuelock);
  225. list_for_each_entry(entry, &ctx->submitqueues, node) {
  226. if (entry->id == id) {
  227. list_del(&entry->node);
  228. write_unlock(&ctx->queuelock);
  229. msm_submitqueue_put(entry);
  230. return 0;
  231. }
  232. }
  233. write_unlock(&ctx->queuelock);
  234. return -ENOENT;
  235. }