sde_fence.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
  4. * Copyright (c) 2016-2021, The Linux Foundation. All rights reserved.
  5. */
  6. #define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
  7. #include <linux/sync_file.h>
  8. #include <linux/dma-fence.h>
  9. #include <linux/dma-fence-array.h>
  10. #include "msm_drv.h"
  11. #include "sde_kms.h"
  12. #include "sde_fence.h"
  13. #define TIMELINE_VAL_LENGTH 128
  14. #define SPEC_FENCE_FLAG_FENCE_ARRAY 0x10
  15. #define SPEC_FENCE_FLAG_ARRAY_BIND 0x11
  16. void *sde_sync_get(uint64_t fd)
  17. {
  18. /* force signed compare, fdget accepts an int argument */
  19. return (signed int)fd >= 0 ? sync_file_get_fence(fd) : NULL;
  20. }
  21. void sde_sync_put(void *fence)
  22. {
  23. if (fence)
  24. dma_fence_put(fence);
  25. }
  26. void sde_fence_dump(struct dma_fence *fence)
  27. {
  28. char timeline_str[TIMELINE_VAL_LENGTH];
  29. if (fence->ops->timeline_value_str)
  30. fence->ops->timeline_value_str(fence, timeline_str, TIMELINE_VAL_LENGTH);
  31. SDE_ERROR(
  32. "fence drv name:%s timeline name:%s seqno:0x%llx timeline:%s signaled:0x%x status:%d flags:0x%x\n",
  33. fence->ops->get_driver_name(fence),
  34. fence->ops->get_timeline_name(fence),
  35. fence->seqno, timeline_str,
  36. fence->ops->signaled ?
  37. fence->ops->signaled(fence) : 0xffffffff,
  38. dma_fence_get_status(fence), fence->flags);
  39. }
  40. static void sde_fence_dump_user_fds_info(struct dma_fence *base_fence)
  41. {
  42. struct dma_fence_array *array;
  43. struct dma_fence *user_fence;
  44. int i;
  45. array = container_of(base_fence, struct dma_fence_array, base);
  46. if (test_bit(SPEC_FENCE_FLAG_FENCE_ARRAY, &base_fence->flags) &&
  47. test_bit(SPEC_FENCE_FLAG_ARRAY_BIND, &base_fence->flags)) {
  48. for (i = 0; i < array->num_fences; i++) {
  49. user_fence = array->fences[i];
  50. if (user_fence) {
  51. dma_fence_get(user_fence);
  52. sde_fence_dump(user_fence);
  53. dma_fence_put(user_fence);
  54. }
  55. }
  56. }
  57. }
  58. signed long sde_sync_wait(void *fnc, long timeout_ms)
  59. {
  60. struct dma_fence *fence = fnc;
  61. int rc, status = 0;
  62. if (!fence)
  63. return -EINVAL;
  64. else if (dma_fence_is_signaled(fence))
  65. return timeout_ms ? msecs_to_jiffies(timeout_ms) : 1;
  66. rc = dma_fence_wait_timeout(fence, true,
  67. msecs_to_jiffies(timeout_ms));
  68. if (!rc || (rc == -EINVAL) || fence->error) {
  69. status = dma_fence_get_status(fence);
  70. if (test_bit(SPEC_FENCE_FLAG_FENCE_ARRAY, &fence->flags)) {
  71. if (status == -EINVAL) {
  72. SDE_INFO("spec fence bind failure status:%d\n", status);
  73. rc = -EBADF;
  74. } else if (fence->ops->signaled && fence->ops->signaled(fence)) {
  75. SDE_INFO("spec fence status:%d\n", status);
  76. } else {
  77. sde_fence_dump(fence);
  78. sde_fence_dump_user_fds_info(fence);
  79. }
  80. } else {
  81. sde_fence_dump(fence);
  82. }
  83. }
  84. return rc;
  85. }
  86. uint32_t sde_sync_get_name_prefix(void *fence)
  87. {
  88. const char *name;
  89. uint32_t i, prefix;
  90. struct dma_fence *f = fence;
  91. if (!fence)
  92. return 0;
  93. name = f->ops->get_driver_name(f);
  94. if (!name)
  95. return 0;
  96. prefix = 0x0;
  97. for (i = 0; i < sizeof(uint32_t) && name[i]; ++i)
  98. prefix = (prefix << CHAR_BIT) | name[i];
  99. return prefix;
  100. }
  101. /**
  102. * struct sde_fence - release/retire fence structure
  103. * @fence: base fence structure
  104. * @name: name of each fence- it is fence timeline + commit_count
  105. * @fence_list: list to associated this fence on timeline/context
  106. * @fd: fd attached to this fence - debugging purpose.
  107. */
  108. struct sde_fence {
  109. struct dma_fence base;
  110. struct sde_fence_context *ctx;
  111. char name[SDE_FENCE_NAME_SIZE];
  112. struct list_head fence_list;
  113. int fd;
  114. };
  115. static void sde_fence_destroy(struct kref *kref)
  116. {
  117. struct sde_fence_context *ctx;
  118. if (!kref) {
  119. SDE_ERROR("received invalid kref\n");
  120. return;
  121. }
  122. ctx = container_of(kref, struct sde_fence_context, kref);
  123. kfree(ctx);
  124. }
  125. static inline struct sde_fence *to_sde_fence(struct dma_fence *fence)
  126. {
  127. return container_of(fence, struct sde_fence, base);
  128. }
  129. static const char *sde_fence_get_driver_name(struct dma_fence *fence)
  130. {
  131. struct sde_fence *f = to_sde_fence(fence);
  132. return f->name;
  133. }
  134. static const char *sde_fence_get_timeline_name(struct dma_fence *fence)
  135. {
  136. struct sde_fence *f = to_sde_fence(fence);
  137. return f->ctx->name;
  138. }
  139. static bool sde_fence_enable_signaling(struct dma_fence *fence)
  140. {
  141. return true;
  142. }
  143. static bool sde_fence_signaled(struct dma_fence *fence)
  144. {
  145. struct sde_fence *f = to_sde_fence(fence);
  146. bool status;
  147. status = ((int)(fence->seqno - f->ctx->done_count) <= 0);
  148. SDE_DEBUG("status:%d fence seq:%llu and timeline:%u\n",
  149. status, fence->seqno, f->ctx->done_count);
  150. return status;
  151. }
  152. static void sde_fence_release(struct dma_fence *fence)
  153. {
  154. struct sde_fence *f;
  155. if (fence) {
  156. f = to_sde_fence(fence);
  157. kref_put(&f->ctx->kref, sde_fence_destroy);
  158. kfree(f);
  159. }
  160. }
  161. static void sde_fence_value_str(struct dma_fence *fence, char *str, int size)
  162. {
  163. if (!fence || !str)
  164. return;
  165. snprintf(str, size, "%llu", fence->seqno);
  166. }
  167. static void sde_fence_timeline_value_str(struct dma_fence *fence, char *str,
  168. int size)
  169. {
  170. struct sde_fence *f = to_sde_fence(fence);
  171. if (!fence || !f->ctx || !str)
  172. return;
  173. snprintf(str, size, "%d", f->ctx->done_count);
  174. }
  175. static struct dma_fence_ops sde_fence_ops = {
  176. .get_driver_name = sde_fence_get_driver_name,
  177. .get_timeline_name = sde_fence_get_timeline_name,
  178. .enable_signaling = sde_fence_enable_signaling,
  179. .signaled = sde_fence_signaled,
  180. .wait = dma_fence_default_wait,
  181. .release = sde_fence_release,
  182. .fence_value_str = sde_fence_value_str,
  183. .timeline_value_str = sde_fence_timeline_value_str,
  184. };
  185. /**
  186. * _sde_fence_create_fd - create fence object and return an fd for it
  187. * This function is NOT thread-safe.
  188. * @timeline: Timeline to associate with fence
  189. * @val: Timeline value at which to signal the fence
  190. * Return: File descriptor on success, or error code on error
  191. */
  192. static int _sde_fence_create_fd(void *fence_ctx, uint32_t val)
  193. {
  194. struct sde_fence *sde_fence;
  195. struct sync_file *sync_file;
  196. signed int fd = -EINVAL;
  197. struct sde_fence_context *ctx = fence_ctx;
  198. if (!ctx) {
  199. SDE_ERROR("invalid context\n");
  200. goto exit;
  201. }
  202. sde_fence = kzalloc(sizeof(*sde_fence), GFP_KERNEL);
  203. if (!sde_fence)
  204. return -ENOMEM;
  205. sde_fence->ctx = fence_ctx;
  206. snprintf(sde_fence->name, SDE_FENCE_NAME_SIZE, "sde_fence:%s:%u",
  207. sde_fence->ctx->name, val);
  208. dma_fence_init(&sde_fence->base, &sde_fence_ops, &ctx->lock,
  209. ctx->context, val);
  210. kref_get(&ctx->kref);
  211. /* create fd */
  212. fd = get_unused_fd_flags(0);
  213. if (fd < 0) {
  214. SDE_ERROR("failed to get_unused_fd_flags(), %s\n",
  215. sde_fence->name);
  216. dma_fence_put(&sde_fence->base);
  217. goto exit;
  218. }
  219. /* create fence */
  220. sync_file = sync_file_create(&sde_fence->base);
  221. if (sync_file == NULL) {
  222. put_unused_fd(fd);
  223. fd = -EINVAL;
  224. SDE_ERROR("couldn't create fence, %s\n", sde_fence->name);
  225. dma_fence_put(&sde_fence->base);
  226. goto exit;
  227. }
  228. fd_install(fd, sync_file->file);
  229. sde_fence->fd = fd;
  230. spin_lock(&ctx->list_lock);
  231. list_add_tail(&sde_fence->fence_list, &ctx->fence_list_head);
  232. spin_unlock(&ctx->list_lock);
  233. exit:
  234. return fd;
  235. }
  236. struct sde_fence_context *sde_fence_init(const char *name, uint32_t drm_id)
  237. {
  238. struct sde_fence_context *ctx;
  239. if (!name) {
  240. SDE_ERROR("invalid argument(s)\n");
  241. return ERR_PTR(-EINVAL);
  242. }
  243. ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
  244. if (!ctx) {
  245. SDE_ERROR("failed to alloc fence ctx\n");
  246. return ERR_PTR(-ENOMEM);
  247. }
  248. strlcpy(ctx->name, name, ARRAY_SIZE(ctx->name));
  249. ctx->drm_id = drm_id;
  250. kref_init(&ctx->kref);
  251. ctx->context = dma_fence_context_alloc(1);
  252. spin_lock_init(&ctx->lock);
  253. spin_lock_init(&ctx->list_lock);
  254. INIT_LIST_HEAD(&ctx->fence_list_head);
  255. return ctx;
  256. }
  257. void sde_fence_deinit(struct sde_fence_context *ctx)
  258. {
  259. if (!ctx) {
  260. SDE_ERROR("invalid fence\n");
  261. return;
  262. }
  263. kref_put(&ctx->kref, sde_fence_destroy);
  264. }
  265. void sde_fence_prepare(struct sde_fence_context *ctx)
  266. {
  267. unsigned long flags;
  268. if (!ctx) {
  269. SDE_ERROR("invalid argument(s), fence %pK\n", ctx);
  270. } else {
  271. spin_lock_irqsave(&ctx->lock, flags);
  272. ++ctx->commit_count;
  273. spin_unlock_irqrestore(&ctx->lock, flags);
  274. }
  275. }
  276. static void _sde_fence_trigger(struct sde_fence_context *ctx, bool error, ktime_t ts)
  277. {
  278. unsigned long flags;
  279. struct sde_fence *fc, *next;
  280. bool is_signaled = false;
  281. kref_get(&ctx->kref);
  282. spin_lock(&ctx->list_lock);
  283. if (list_empty(&ctx->fence_list_head)) {
  284. SDE_DEBUG("nothing to trigger!\n");
  285. goto end;
  286. }
  287. list_for_each_entry_safe(fc, next, &ctx->fence_list_head, fence_list) {
  288. spin_lock_irqsave(&ctx->lock, flags);
  289. if (error)
  290. dma_fence_set_error(&fc->base, -EBUSY);
  291. is_signaled = sde_fence_signaled(&fc->base);
  292. if (is_signaled)
  293. dma_fence_signal_timestamp_locked(&fc->base, ts);
  294. spin_unlock_irqrestore(&ctx->lock, flags);
  295. if (is_signaled) {
  296. list_del_init(&fc->fence_list);
  297. dma_fence_put(&fc->base);
  298. }
  299. }
  300. end:
  301. spin_unlock(&ctx->list_lock);
  302. kref_put(&ctx->kref, sde_fence_destroy);
  303. }
  304. int sde_fence_create(struct sde_fence_context *ctx, uint64_t *val,
  305. uint32_t offset)
  306. {
  307. uint32_t trigger_value;
  308. int fd, rc = -EINVAL;
  309. unsigned long flags;
  310. if (!ctx || !val) {
  311. SDE_ERROR("invalid argument(s), fence %d, pval %d\n",
  312. ctx != NULL, val != NULL);
  313. return rc;
  314. }
  315. /*
  316. * Allow created fences to have a constant offset with respect
  317. * to the timeline. This allows us to delay the fence signalling
  318. * w.r.t. the commit completion (e.g., an offset of +1 would
  319. * cause fences returned during a particular commit to signal
  320. * after an additional delay of one commit, rather than at the
  321. * end of the current one.
  322. */
  323. spin_lock_irqsave(&ctx->lock, flags);
  324. trigger_value = ctx->commit_count + offset;
  325. spin_unlock_irqrestore(&ctx->lock, flags);
  326. fd = _sde_fence_create_fd(ctx, trigger_value);
  327. *val = fd;
  328. SDE_DEBUG("fd:%d trigger:%d commit:%d offset:%d\n",
  329. fd, trigger_value, ctx->commit_count, offset);
  330. SDE_EVT32(ctx->drm_id, trigger_value, fd);
  331. rc = (fd >= 0) ? 0 : fd;
  332. return rc;
  333. }
  334. void sde_fence_signal(struct sde_fence_context *ctx, ktime_t ts,
  335. enum sde_fence_event fence_event)
  336. {
  337. unsigned long flags;
  338. if (!ctx) {
  339. SDE_ERROR("invalid ctx, %pK\n", ctx);
  340. return;
  341. }
  342. spin_lock_irqsave(&ctx->lock, flags);
  343. if (fence_event == SDE_FENCE_RESET_TIMELINE) {
  344. if ((int)(ctx->done_count - ctx->commit_count) < 0) {
  345. SDE_DEBUG(
  346. "timeline reset attempt! done count:%d commit:%d\n",
  347. ctx->done_count, ctx->commit_count);
  348. ctx->done_count = ctx->commit_count;
  349. SDE_EVT32(ctx->drm_id, ctx->done_count,
  350. ctx->commit_count, ktime_to_us(ts),
  351. fence_event, SDE_EVTLOG_FUNC_CASE1);
  352. } else {
  353. spin_unlock_irqrestore(&ctx->lock, flags);
  354. return;
  355. }
  356. } else if ((int)(ctx->done_count - ctx->commit_count) < 0) {
  357. ++ctx->done_count;
  358. SDE_DEBUG("fence_signal:done count:%d commit count:%d\n",
  359. ctx->done_count, ctx->commit_count);
  360. } else {
  361. SDE_ERROR("extra signal attempt! done count:%d commit:%d\n",
  362. ctx->done_count, ctx->commit_count);
  363. SDE_EVT32(ctx->drm_id, ctx->done_count, ctx->commit_count,
  364. ktime_to_us(ts), fence_event, SDE_EVTLOG_FATAL);
  365. spin_unlock_irqrestore(&ctx->lock, flags);
  366. return;
  367. }
  368. spin_unlock_irqrestore(&ctx->lock, flags);
  369. SDE_EVT32(ctx->drm_id, ctx->done_count, ctx->commit_count,
  370. ktime_to_us(ts));
  371. _sde_fence_trigger(ctx, (fence_event == SDE_FENCE_SIGNAL_ERROR), ts);
  372. }
  373. void sde_fence_timeline_status(struct sde_fence_context *ctx,
  374. struct drm_mode_object *drm_obj)
  375. {
  376. char *obj_name;
  377. if (!ctx || !drm_obj) {
  378. SDE_ERROR("invalid input params\n");
  379. return;
  380. }
  381. switch (drm_obj->type) {
  382. case DRM_MODE_OBJECT_CRTC:
  383. obj_name = "crtc";
  384. break;
  385. case DRM_MODE_OBJECT_CONNECTOR:
  386. obj_name = "connector";
  387. break;
  388. default:
  389. obj_name = "unknown";
  390. break;
  391. }
  392. SDE_ERROR("drm obj:%s id:%d type:0x%x done_count:%d commit_count:%d\n",
  393. obj_name, drm_obj->id, drm_obj->type, ctx->done_count,
  394. ctx->commit_count);
  395. }
  396. void sde_fence_list_dump(struct dma_fence *fence, struct seq_file **s)
  397. {
  398. char timeline_str[TIMELINE_VAL_LENGTH];
  399. if (fence->ops->timeline_value_str)
  400. fence->ops->timeline_value_str(fence,
  401. timeline_str, TIMELINE_VAL_LENGTH);
  402. seq_printf(*s, "fence name:%s timeline name:%s seqno:0x%llx timeline:%s signaled:0x%x\n",
  403. fence->ops->get_driver_name(fence),
  404. fence->ops->get_timeline_name(fence),
  405. fence->seqno, timeline_str,
  406. fence->ops->signaled ?
  407. fence->ops->signaled(fence) : 0xffffffff);
  408. }
  409. void sde_debugfs_timeline_dump(struct sde_fence_context *ctx,
  410. struct drm_mode_object *drm_obj, struct seq_file **s)
  411. {
  412. char *obj_name;
  413. struct sde_fence *fc, *next;
  414. struct dma_fence *fence;
  415. if (!ctx || !drm_obj) {
  416. SDE_ERROR("invalid input params\n");
  417. return;
  418. }
  419. switch (drm_obj->type) {
  420. case DRM_MODE_OBJECT_CRTC:
  421. obj_name = "crtc";
  422. break;
  423. case DRM_MODE_OBJECT_CONNECTOR:
  424. obj_name = "connector";
  425. break;
  426. default:
  427. obj_name = "unknown";
  428. break;
  429. }
  430. seq_printf(*s, "drm obj:%s id:%d type:0x%x done_count:%d commit_count:%d\n",
  431. obj_name, drm_obj->id, drm_obj->type, ctx->done_count,
  432. ctx->commit_count);
  433. spin_lock(&ctx->list_lock);
  434. list_for_each_entry_safe(fc, next, &ctx->fence_list_head, fence_list) {
  435. fence = &fc->base;
  436. sde_fence_list_dump(fence, s);
  437. }
  438. spin_unlock(&ctx->list_lock);
  439. }