adreno_ioctl.c 8.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2002,2007-2021, The Linux Foundation. All rights reserved.
  4. * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
  5. */
  6. #include <linux/slab.h>
  7. #include "adreno.h"
  8. #include "adreno_a5xx.h"
  9. /*
  10. * Add a perfcounter to the per-fd list.
  11. * Call with the device mutex held
  12. */
  13. static int adreno_process_perfcounter_add(struct kgsl_device_private *dev_priv,
  14. unsigned int groupid, unsigned int countable)
  15. {
  16. struct adreno_device_private *adreno_priv = container_of(dev_priv,
  17. struct adreno_device_private, dev_priv);
  18. struct adreno_perfcounter_list_node *perfctr;
  19. perfctr = kmalloc(sizeof(*perfctr), GFP_KERNEL);
  20. if (!perfctr)
  21. return -ENOMEM;
  22. perfctr->groupid = groupid;
  23. perfctr->countable = countable;
  24. /* add the pair to process perfcounter list */
  25. list_add(&perfctr->node, &adreno_priv->perfcounter_list);
  26. return 0;
  27. }
  28. /*
  29. * Remove a perfcounter from the per-fd list.
  30. * Call with the device mutex held
  31. */
  32. static int adreno_process_perfcounter_del(struct kgsl_device_private *dev_priv,
  33. unsigned int groupid, unsigned int countable)
  34. {
  35. struct adreno_device_private *adreno_priv = container_of(dev_priv,
  36. struct adreno_device_private, dev_priv);
  37. struct adreno_perfcounter_list_node *p;
  38. list_for_each_entry(p, &adreno_priv->perfcounter_list, node) {
  39. if (p->groupid == groupid && p->countable == countable) {
  40. list_del(&p->node);
  41. kfree(p);
  42. return 0;
  43. }
  44. }
  45. return -ENODEV;
  46. }
  47. long adreno_ioctl_perfcounter_get(struct kgsl_device_private *dev_priv,
  48. unsigned int cmd, void *data)
  49. {
  50. struct kgsl_device *device = dev_priv->device;
  51. struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
  52. struct kgsl_perfcounter_get *get = data;
  53. int result;
  54. mutex_lock(&device->mutex);
  55. /*
  56. * adreno_perfcounter_get() is called by kernel clients
  57. * during start(), so it is not safe to take an
  58. * active count inside that function.
  59. */
  60. result = adreno_perfcntr_active_oob_get(adreno_dev);
  61. if (result) {
  62. mutex_unlock(&device->mutex);
  63. return (long)result;
  64. }
  65. result = adreno_perfcounter_get(adreno_dev,
  66. get->groupid, get->countable, &get->offset,
  67. &get->offset_hi, PERFCOUNTER_FLAG_NONE);
  68. /* Add the perfcounter into the list */
  69. if (!result) {
  70. result = adreno_process_perfcounter_add(dev_priv, get->groupid,
  71. get->countable);
  72. if (result)
  73. adreno_perfcounter_put(adreno_dev, get->groupid,
  74. get->countable, PERFCOUNTER_FLAG_NONE);
  75. }
  76. adreno_perfcntr_active_oob_put(adreno_dev);
  77. mutex_unlock(&device->mutex);
  78. return (long) result;
  79. }
  80. long adreno_ioctl_perfcounter_put(struct kgsl_device_private *dev_priv,
  81. unsigned int cmd, void *data)
  82. {
  83. struct kgsl_device *device = dev_priv->device;
  84. struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
  85. struct kgsl_perfcounter_put *put = data;
  86. int result;
  87. mutex_lock(&device->mutex);
  88. /* Delete the perfcounter from the process list */
  89. result = adreno_process_perfcounter_del(dev_priv, put->groupid,
  90. put->countable);
  91. /* Put the perfcounter refcount */
  92. if (!result)
  93. adreno_perfcounter_put(adreno_dev, put->groupid,
  94. put->countable, PERFCOUNTER_FLAG_NONE);
  95. mutex_unlock(&device->mutex);
  96. return (long) result;
  97. }
  98. static long adreno_ioctl_perfcounter_query(struct kgsl_device_private *dev_priv,
  99. unsigned int cmd, void *data)
  100. {
  101. struct adreno_device *adreno_dev = ADRENO_DEVICE(dev_priv->device);
  102. struct kgsl_perfcounter_query *query = data;
  103. return (long) adreno_perfcounter_query_group(adreno_dev, query->groupid,
  104. query->countables, query->count, &query->max_counters);
  105. }
  106. static long adreno_ioctl_perfcounter_read(struct kgsl_device_private *dev_priv,
  107. unsigned int cmd, void *data)
  108. {
  109. struct adreno_device *adreno_dev = ADRENO_DEVICE(dev_priv->device);
  110. struct kgsl_perfcounter_read *read = data;
  111. /*
  112. * When performance counter zapping is enabled, the counters are cleared
  113. * across context switches. Reading the counters when they are zapped is
  114. * not permitted.
  115. */
  116. if (!adreno_dev->perfcounter)
  117. return -EPERM;
  118. return (long) adreno_perfcounter_read_group(adreno_dev, read->reads,
  119. read->count);
  120. }
  121. static long adreno_ioctl_preemption_counters_query(
  122. struct kgsl_device_private *dev_priv,
  123. unsigned int cmd, void *data)
  124. {
  125. struct adreno_device *adreno_dev = ADRENO_DEVICE(dev_priv->device);
  126. struct kgsl_preemption_counters_query *read = data;
  127. int size_level = A5XX_CP_CTXRECORD_PREEMPTION_COUNTER_SIZE;
  128. int levels_to_copy;
  129. if (!adreno_is_a5xx(adreno_dev) ||
  130. !adreno_is_preemption_enabled(adreno_dev))
  131. return -EOPNOTSUPP;
  132. if (read->size_user < size_level)
  133. return -EINVAL;
  134. /* Calculate number of preemption counter levels to copy to userspace */
  135. levels_to_copy = (read->size_user / size_level);
  136. levels_to_copy = min_t(int, levels_to_copy,
  137. ARRAY_SIZE(adreno_dev->ringbuffers));
  138. if (copy_to_user(u64_to_user_ptr(read->counters),
  139. adreno_dev->preempt.scratch->hostptr,
  140. levels_to_copy * size_level))
  141. return -EFAULT;
  142. read->max_priority_level = levels_to_copy;
  143. read->size_priority_level = size_level;
  144. return 0;
  145. }
  146. static long adreno_ioctl_read_calibrated_ts(struct kgsl_device_private *dev_priv,
  147. unsigned int cmd, void *data)
  148. {
  149. struct adreno_device *adreno_dev = ADRENO_DEVICE(dev_priv->device);
  150. struct kgsl_read_calibrated_timestamps *reads = data;
  151. unsigned long flags;
  152. u32 *sources = NULL;
  153. u64 *ts = NULL;
  154. u64 start;
  155. u64 samples[KGSL_CALIBRATED_TIME_DOMAIN_MAX] = {0};
  156. u32 i;
  157. int ret = 0;
  158. /* Reading calibrated timestamps requires the CX timer be initialized */
  159. if (!test_bit(ADRENO_DEVICE_CX_TIMER_INITIALIZED, &adreno_dev->priv))
  160. return -EOPNOTSUPP;
  161. /* Check that the number of timestamps is reasonable */
  162. if (!reads->count ||
  163. (reads->count > (2 * KGSL_CALIBRATED_TIME_DOMAIN_MAX)))
  164. return -EINVAL;
  165. sources = kvcalloc(reads->count, sizeof(*sources), GFP_KERNEL);
  166. if (!sources)
  167. return -ENOMEM;
  168. if (copy_from_user(sources, u64_to_user_ptr(reads->sources),
  169. reads->count * sizeof(*sources))) {
  170. ret = -EFAULT;
  171. goto done;
  172. }
  173. for (i = 0; i < reads->count; i++) {
  174. if (sources[i] >= KGSL_CALIBRATED_TIME_DOMAIN_MAX) {
  175. ret = -EINVAL;
  176. goto done;
  177. }
  178. }
  179. ts = kvcalloc(reads->count, sizeof(*ts), GFP_KERNEL);
  180. if (!ts) {
  181. ret = -ENOMEM;
  182. goto done;
  183. }
  184. /* Disable local irqs to prevent context switch delays */
  185. local_irq_save(flags);
  186. /* Sample the MONOTONIC_RAW domain for use in calculating deviation */
  187. start = (u64)ktime_to_ns(ktime_get_raw());
  188. samples[KGSL_CALIBRATED_TIME_DOMAIN_DEVICE] =
  189. adreno_read_cx_timer(adreno_dev);
  190. samples[KGSL_CALIBRATED_TIME_DOMAIN_MONOTONIC] =
  191. (u64)ktime_to_ns(ktime_get());
  192. samples[KGSL_CALIBRATED_TIME_DOMAIN_MONOTONIC_RAW] =
  193. (u64)ktime_to_ns(ktime_get_raw());
  194. /* Done collecting timestamps. Re-enable irqs */
  195. local_irq_restore(flags);
  196. /* Calculate deviation in reads based on the MONOTONIC_RAW samples */
  197. reads->deviation = samples[KGSL_CALIBRATED_TIME_DOMAIN_MONOTONIC_RAW] - start;
  198. for (i = 0; i < reads->count; i++)
  199. ts[i] = samples[sources[i]];
  200. if (copy_to_user(u64_to_user_ptr(reads->ts), ts, reads->count * sizeof(*ts)))
  201. ret = -EFAULT;
  202. done:
  203. kvfree(ts);
  204. kvfree(sources);
  205. return ret;
  206. }
  207. long adreno_ioctl_helper(struct kgsl_device_private *dev_priv,
  208. unsigned int cmd, unsigned long arg,
  209. const struct kgsl_ioctl *cmds, int len)
  210. {
  211. unsigned char data[128] = { 0 };
  212. long ret;
  213. int i;
  214. for (i = 0; i < len; i++) {
  215. if (_IOC_NR(cmd) == _IOC_NR(cmds[i].cmd))
  216. break;
  217. }
  218. if (i == len)
  219. return -ENOIOCTLCMD;
  220. if (_IOC_SIZE(cmds[i].cmd > sizeof(data))) {
  221. dev_err_ratelimited(dev_priv->device->dev,
  222. "data too big for ioctl 0x%08x: %d/%zu\n",
  223. cmd, _IOC_SIZE(cmds[i].cmd), sizeof(data));
  224. return -EINVAL;
  225. }
  226. if (_IOC_SIZE(cmds[i].cmd)) {
  227. ret = kgsl_ioctl_copy_in(cmds[i].cmd, cmd, arg, data);
  228. if (ret)
  229. return ret;
  230. } else {
  231. memset(data, 0, sizeof(data));
  232. }
  233. ret = cmds[i].func(dev_priv, cmd, data);
  234. if (ret == 0 && _IOC_SIZE(cmds[i].cmd))
  235. ret = kgsl_ioctl_copy_out(cmds[i].cmd, cmd, arg, data);
  236. return ret;
  237. }
  238. static struct kgsl_ioctl adreno_ioctl_funcs[] = {
  239. { IOCTL_KGSL_PERFCOUNTER_GET, adreno_ioctl_perfcounter_get },
  240. { IOCTL_KGSL_PERFCOUNTER_PUT, adreno_ioctl_perfcounter_put },
  241. { IOCTL_KGSL_PERFCOUNTER_QUERY, adreno_ioctl_perfcounter_query },
  242. { IOCTL_KGSL_PERFCOUNTER_READ, adreno_ioctl_perfcounter_read },
  243. { IOCTL_KGSL_PREEMPTIONCOUNTER_QUERY,
  244. adreno_ioctl_preemption_counters_query },
  245. { IOCTL_KGSL_READ_CALIBRATED_TIMESTAMPS, adreno_ioctl_read_calibrated_ts },
  246. };
  247. long adreno_ioctl(struct kgsl_device_private *dev_priv,
  248. unsigned int cmd, unsigned long arg)
  249. {
  250. return adreno_ioctl_helper(dev_priv, cmd, arg,
  251. adreno_ioctl_funcs, ARRAY_SIZE(adreno_ioctl_funcs));
  252. }