sde_fence.c 32 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
  4. * Copyright (c) 2016-2021, The Linux Foundation. All rights reserved.
  5. */
  6. #define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
  7. #include <linux/sync_file.h>
  8. #include <linux/dma-fence.h>
  9. #include <linux/dma-fence-array.h>
  10. #include <linux/file.h>
  11. #include "msm_drv.h"
  12. #include "sde_kms.h"
  13. #include "sde_fence.h"
  14. #define TIMELINE_VAL_LENGTH 128
  15. #define SPEC_FENCE_FLAG_FENCE_ARRAY 0x10
  16. #define SPEC_FENCE_FLAG_ARRAY_BIND 0x11
  17. /**
  18. * struct sde_fence - release/retire fence structure
  19. * @base: base fence structure
  20. * @ctx: fence context
  21. * @name: name of each fence- it is fence timeline + commit_count
  22. * @fence_list: list to associated this fence on timeline/context
  23. * @fd: fd attached to this fence - debugging purpose.
  24. * @hwfence_out_ctl: hw ctl for the output fence
  25. * @hwfence_index: hw fence index for this fence
  26. * @txq_updated_fence: flag to indicate that a fence has been updated in txq
  27. */
  28. struct sde_fence {
  29. struct dma_fence base;
  30. struct sde_fence_context *ctx;
  31. char name[SDE_FENCE_NAME_SIZE];
  32. struct list_head fence_list;
  33. int fd;
  34. struct sde_hw_ctl *hwfence_out_ctl;
  35. u64 hwfence_index;
  36. bool txq_updated_fence;
  37. };
  38. /**
  39. * enum sde_hw_fence_clients - sde clients for the hw-fence feature
  40. *
  41. * Do not modify the order of this struct and/or add more elements
  42. * without modify/add fields in the 'hw_fence_data' structs.
  43. */
  44. enum sde_hw_fence_clients {
  45. SDE_HW_FENCE_CLIENT_CTL_0,
  46. SDE_HW_FENCE_CLIENT_CTL_1,
  47. SDE_HW_FENCE_CLIENT_CTL_2,
  48. SDE_HW_FENCE_CLIENT_CTL_3,
  49. SDE_HW_FENCE_CLIENT_CTL_4,
  50. SDE_HW_FENCE_CLIENT_CTL_5,
  51. SDE_HW_FENCE_CLIENT_MAX,
  52. };
  53. /**
  54. * hw_fence_data_dpu_client - this table maps the dpu ipcc input and output signals for each display
  55. * clients to communicate with the fence controller.
  56. * This struct must match the order of the 'sde_hw_fence_clients' enum,
  57. * the output signal must match with the signals that FenceCTL expects for each display client.
  58. * This 'hw_fence_data_dpu_client' must be used for HW that does not support dpu-signal.
  59. */
  60. struct sde_hw_fence_data hw_fence_data_no_dpu[SDE_HW_FENCE_CLIENT_MAX] = {
  61. {SDE_HW_FENCE_CLIENT_CTL_0, HW_FENCE_CLIENT_ID_CTL0, NULL, {0}, 8, 14, {2, 3}, 0, 8, 8,
  62. 0, 0},
  63. {SDE_HW_FENCE_CLIENT_CTL_1, HW_FENCE_CLIENT_ID_CTL1, NULL, {0}, 8, 15, {4, 5}, 0, 8, 8,
  64. 0, 0},
  65. {SDE_HW_FENCE_CLIENT_CTL_2, HW_FENCE_CLIENT_ID_CTL2, NULL, {0}, 8, 16, {6, 7}, 0, 8, 8,
  66. 0, 0},
  67. {SDE_HW_FENCE_CLIENT_CTL_3, HW_FENCE_CLIENT_ID_CTL3, NULL, {0}, 8, 17, {8, 9}, 0, 8, 8,
  68. 0, 0},
  69. {SDE_HW_FENCE_CLIENT_CTL_4, HW_FENCE_CLIENT_ID_CTL4, NULL, {0}, 8, 18, {10, 11}, 0, 8, 8,
  70. 0, 0},
  71. {SDE_HW_FENCE_CLIENT_CTL_5, HW_FENCE_CLIENT_ID_CTL5, NULL, {0}, 8, 19, {12, 13}, 0, 8, 8,
  72. 0, 0}
  73. };
  74. /**
  75. * hw_fence_data_dpu_client - this table maps the dpu ipcc input and output signals for each display
  76. * clients to communicate with the fence controller.
  77. * This struct must match the order of the 'sde_hw_fence_clients' enum,
  78. * the output signal must match with the signals that FenceCTL expects for each display client.
  79. * This 'hw_fence_data_dpu_client' must be used for HW that supports dpu-signal
  80. */
  81. struct sde_hw_fence_data hw_fence_data_dpu_client[SDE_HW_FENCE_CLIENT_MAX] = {
  82. {SDE_HW_FENCE_CLIENT_CTL_0, HW_FENCE_CLIENT_ID_CTL0, NULL, {0}, 8, 0, {0, 6}, 0, 8, 25,
  83. 0, 0},
  84. {SDE_HW_FENCE_CLIENT_CTL_1, HW_FENCE_CLIENT_ID_CTL1, NULL, {0}, 8, 1, {1, 7}, 0, 8, 25,
  85. 0, 0},
  86. {SDE_HW_FENCE_CLIENT_CTL_2, HW_FENCE_CLIENT_ID_CTL2, NULL, {0}, 8, 2, {2, 8}, 0, 8, 25,
  87. 0, 0},
  88. {SDE_HW_FENCE_CLIENT_CTL_3, HW_FENCE_CLIENT_ID_CTL3, NULL, {0}, 8, 3, {3, 9}, 0, 8, 25,
  89. 0, 0},
  90. {SDE_HW_FENCE_CLIENT_CTL_4, HW_FENCE_CLIENT_ID_CTL4, NULL, {0}, 8, 4, {4, 10}, 0, 8, 25,
  91. 0, 0},
  92. {SDE_HW_FENCE_CLIENT_CTL_5, HW_FENCE_CLIENT_ID_CTL5, NULL, {0}, 8, 5, {5, 11}, 0, 8, 25,
  93. 0, 0}
  94. };
  95. int sde_hw_fence_init(struct sde_hw_ctl *hw_ctl, bool use_dpu_ipcc)
  96. {
  97. struct sde_hw_fence_data *sde_hw_fence_data;
  98. struct sde_hw_fence_data *hwfence_data;
  99. int ctl_id;
  100. if (!hw_ctl)
  101. return -EINVAL;
  102. ctl_id = hw_ctl->idx - CTL_0;
  103. if (ctl_id >= SDE_HW_FENCE_CLIENT_MAX || ctl_id < 0) {
  104. SDE_ERROR("unexpected ctl_id:%d\n", ctl_id);
  105. return -EINVAL;
  106. }
  107. hwfence_data = &hw_ctl->hwfence_data;
  108. sde_hw_fence_data = use_dpu_ipcc ? hw_fence_data_dpu_client : hw_fence_data_no_dpu;
  109. if (sde_hw_fence_data[ctl_id].client_id != ctl_id) {
  110. SDE_ERROR("Unexpected client_id:%d for ctl_id:%d\n",
  111. sde_hw_fence_data[ctl_id].client_id, ctl_id);
  112. return -EINVAL;
  113. }
  114. /* init the default fence-data for this client */
  115. memcpy(hwfence_data, &sde_hw_fence_data[ctl_id], sizeof(struct sde_hw_fence_data));
  116. SDE_DEBUG("hwfence register ctl:%d client:%d\n", ctl_id, hwfence_data->hw_fence_client_id);
  117. hwfence_data->hw_fence_handle = msm_hw_fence_register(hwfence_data->hw_fence_client_id,
  118. &hwfence_data->mem_descriptor);
  119. hwfence_data->dma_context = dma_fence_context_alloc(1);
  120. if (IS_ERR_OR_NULL(hwfence_data->hw_fence_handle)) {
  121. hwfence_data->hw_fence_handle = NULL;
  122. SDE_DEBUG("error cannot register ctl_id:%d hw-fence client:%d\n", ctl_id,
  123. hwfence_data->hw_fence_client_id);
  124. return -EINVAL;
  125. }
  126. SDE_DEBUG("hwfence registered ctl_id:%d hw_fence_client_id:%d handle:0x%p\n",
  127. ctl_id, hwfence_data->hw_fence_client_id, hwfence_data->hw_fence_handle);
  128. return 0;
  129. }
  130. void sde_hw_fence_deinit(struct sde_hw_ctl *hw_ctl)
  131. {
  132. struct sde_hw_fence_data *hwfence_data;
  133. if (!hw_ctl)
  134. return;
  135. hwfence_data = &hw_ctl->hwfence_data;
  136. /* client was not registered */
  137. if (IS_ERR_OR_NULL(hwfence_data->hw_fence_handle))
  138. return;
  139. SDE_DEBUG("hwfence deregister ctl_id:%d hw_fence_client_id:%d\n",
  140. hw_ctl->idx - CTL_0, hwfence_data->hw_fence_client_id);
  141. msm_hw_fence_deregister(hwfence_data->hw_fence_handle);
  142. hwfence_data->hw_fence_handle = NULL;
  143. }
  144. static int sde_fence_create_hw_fence(struct sde_hw_ctl *hw_ctl, struct sde_fence *sde_fence)
  145. {
  146. struct sde_hw_fence_data *data;
  147. struct msm_hw_fence_create_params params;
  148. int ctl_id;
  149. u64 hwfence_index;
  150. int ret;
  151. if (!hw_ctl)
  152. return -EINVAL;
  153. ctl_id = hw_ctl->idx - CTL_0;
  154. data = &hw_ctl->hwfence_data;
  155. if (IS_ERR_OR_NULL(data->hw_fence_handle)) {
  156. SDE_ERROR("unexpected handle for ctl_id:%d\n", ctl_id);
  157. return -EINVAL;
  158. }
  159. params.fence = &sde_fence->base;
  160. params.handle = &hwfence_index;
  161. /* Create the HW fence */
  162. ret = msm_hw_fence_create(data->hw_fence_handle, &params);
  163. if (ret) {
  164. SDE_ERROR("failed to create hw_fence for client:%d ctx:%llu seqno:%llu\n", ctl_id,
  165. sde_fence->base.context, sde_fence->base.seqno);
  166. } else {
  167. /* store ctl and index for this fence */
  168. sde_fence->hwfence_out_ctl = hw_ctl;
  169. sde_fence->hwfence_index = hwfence_index;
  170. SDE_DEBUG("create hfence index:%llu ctl:%d ctx:%llu seqno:%llu name:%s\n",
  171. sde_fence->hwfence_index, ctl_id, sde_fence->base.context,
  172. sde_fence->base.seqno, sde_fence->name);
  173. }
  174. return ret;
  175. }
  176. static inline char *_get_client_id_name(int hw_fence_client_id)
  177. {
  178. switch (hw_fence_client_id) {
  179. case HW_FENCE_CLIENT_ID_CTX0:
  180. return "HW_FENCE_CLIENT_ID_CTX0";
  181. case HW_FENCE_CLIENT_ID_CTL0:
  182. return "HW_FENCE_CLIENT_ID_CTL0";
  183. case HW_FENCE_CLIENT_ID_CTL1:
  184. return "HW_FENCE_CLIENT_ID_CTL1";
  185. case HW_FENCE_CLIENT_ID_CTL2:
  186. return "HW_FENCE_CLIENT_ID_CTL2";
  187. case HW_FENCE_CLIENT_ID_CTL3:
  188. return "HW_FENCE_CLIENT_ID_CTL3";
  189. case HW_FENCE_CLIENT_ID_CTL4:
  190. return "HW_FENCE_CLIENT_ID_CTL4";
  191. case HW_FENCE_CLIENT_ID_CTL5:
  192. return "HW_FENCE_CLIENT_ID_CTL15";
  193. default:
  194. return "Unknown";
  195. }
  196. return "unknown";
  197. }
  198. static void _cleanup_fences_refcount(struct dma_fence **fences, u32 num_fences)
  199. {
  200. int i;
  201. for (i = 0; i < num_fences; i++)
  202. dma_fence_put(fences[i]);
  203. }
  204. int sde_fence_register_hw_fences_wait(struct sde_hw_ctl *hw_ctl, struct dma_fence **fences,
  205. u32 num_fences)
  206. {
  207. struct sde_hw_fence_data *data;
  208. int i, j, ret;
  209. int ctl_id;
  210. struct dma_fence_array *temp_array = NULL;
  211. struct dma_fence *base_fence;
  212. struct dma_fence **hw_fences;
  213. u32 num_hw_fences;
  214. struct dma_fence **fence_list;
  215. struct dma_fence_array *array = NULL;
  216. int array_childs = 0;
  217. int array_count = 0;
  218. int fence_list_index = 0;
  219. if (!hw_ctl) {
  220. SDE_ERROR("wrong ctl\n");
  221. return -EINVAL;
  222. }
  223. ctl_id = hw_ctl->idx - CTL_0;
  224. data = &hw_ctl->hwfence_data;
  225. if (IS_ERR_OR_NULL(data->hw_fence_handle)) {
  226. SDE_ERROR("unexpected handle for ctl_id:%d\n", ctl_id);
  227. return -EINVAL;
  228. }
  229. SDE_DEBUG("register for wait fences:%d ctl_id:%d hw_fence_client:%s\n",
  230. num_fences, ctl_id, _get_client_id_name(data->hw_fence_client_id));
  231. for (i = 0; i < num_fences; i++) {
  232. /* get a refcount for each of the fences */
  233. dma_fence_get(fences[i]);
  234. if (dma_fence_is_array(fences[i])) {
  235. array_count++;
  236. array = container_of(fences[i], struct dma_fence_array, base);
  237. array_childs += array->num_fences;
  238. }
  239. SDE_DEBUG("registering fence: ctx:%llu seqno:%llu\n",
  240. (fences[i])->context, (fences[i])->seqno);
  241. }
  242. if (num_fences > 1) {
  243. /* fence_list memory is freed during fence-array release */
  244. fence_list = kzalloc(((num_fences - array_count) + array_childs)
  245. * (sizeof(struct dma_fence *)), GFP_KERNEL);
  246. if (!fence_list) {
  247. _cleanup_fences_refcount(fences, num_fences);
  248. return -EINVAL;
  249. }
  250. /* populate fence_list with the fences */
  251. for (i = 0; i < num_fences; i++) {
  252. if (dma_fence_is_array(fences[i])) {
  253. array = container_of(fences[i], struct dma_fence_array, base);
  254. for (j = 0; j < array->num_fences; j++) {
  255. /* get a refcount for each of the child fences */
  256. dma_fence_get(array->fences[j]);
  257. fence_list[fence_list_index++] = array->fences[j];
  258. }
  259. if (array->num_fences) /* print the first fence from array */
  260. SDE_EVT32(ctl_id, num_fences, array->num_fences, i,
  261. SDE_EVTLOG_H32(array->fences[0]->context),
  262. SDE_EVTLOG_L32(array->fences[0]->context),
  263. SDE_EVTLOG_H32(array->fences[0]->seqno),
  264. SDE_EVTLOG_L32(array->fences[0]->seqno));
  265. else
  266. SDE_EVT32(ctl_id, num_fences, array->num_fences, i,
  267. SDE_EVTLOG_ERROR);
  268. /* remove refcount on parent */
  269. dma_fence_put(fences[i]);
  270. } else {
  271. fence_list[fence_list_index++] = fences[i];
  272. SDE_EVT32(ctl_id, num_fences, i, SDE_EVTLOG_H32(fences[i]->context),
  273. SDE_EVTLOG_L32(fences[i]->context),
  274. SDE_EVTLOG_H32(fences[i]->seqno),
  275. SDE_EVTLOG_L32(fences[i]->seqno));
  276. }
  277. }
  278. temp_array = dma_fence_array_create(fence_list_index, fence_list,
  279. data->dma_context, data->hw_fence_array_seqno++, 0);
  280. if (!temp_array) {
  281. SDE_ERROR("unable to create fence array, cant register for wait\n");
  282. _cleanup_fences_refcount(fences, num_fences);
  283. kfree(fence_list);
  284. return -EINVAL;
  285. }
  286. base_fence = &temp_array->base;
  287. hw_fences = &base_fence;
  288. num_hw_fences = 1;
  289. } else {
  290. struct dma_fence_array *tmp_array;
  291. hw_fences = fences;
  292. num_hw_fences = num_fences;
  293. tmp_array = dma_fence_is_array(fences[0]) ?
  294. container_of(fences[0], struct dma_fence_array, base) :
  295. NULL;
  296. SDE_EVT32(ctl_id, num_hw_fences, SDE_EVTLOG_H32(fences[0]->context),
  297. SDE_EVTLOG_L32(fences[0]->context), SDE_EVTLOG_H32(fences[0]->seqno),
  298. SDE_EVTLOG_L32(fences[0]->seqno), fences[0]->flags,
  299. tmp_array ? tmp_array->num_fences : SDE_EVTLOG_FUNC_CASE2);
  300. }
  301. /* register for wait */
  302. ret = msm_hw_fence_wait_update(data->hw_fence_handle, hw_fences, num_hw_fences, true);
  303. if (ret)
  304. SDE_ERROR("failed to register wait fences for ctl_id:%d ret:%d\n", ctl_id, ret);
  305. /* fence-array put will release each individual extra refcount during array release */
  306. if (temp_array)
  307. dma_fence_put(&temp_array->base);
  308. else
  309. dma_fence_put(fences[0]);
  310. SDE_EVT32_VERBOSE(ctl_id, num_fences, ret);
  311. return ret;
  312. }
  313. static int _arm_output_hw_fence(struct sde_hw_ctl *hw_ctl, u32 line_count, u32 debugfs_hw_fence)
  314. {
  315. struct sde_hw_fence_data *data;
  316. u32 ipcc_out_signal;
  317. int ctl_id;
  318. if (!hw_ctl || !hw_ctl->ops.hw_fence_trigger_output_fence ||
  319. !hw_ctl->ops.hw_fence_update_output_fence) {
  320. SDE_ERROR("missing ctl/trigger or update fence %d\n", !hw_ctl);
  321. return -EINVAL;
  322. }
  323. ctl_id = hw_ctl->idx - CTL_0;
  324. data = &hw_ctl->hwfence_data;
  325. if (data->ipcc_out_signal_pp_idx >= MAX_SDE_HFENCE_OUT_SIGNAL_PING_PONG) {
  326. /* This should not have happened!, review the ping pong calculation */
  327. SDE_ERROR("Wrong pp_idx:%d, max:%d\n", data->ipcc_out_signal_pp_idx,
  328. MAX_SDE_HFENCE_OUT_SIGNAL_PING_PONG);
  329. return -EINVAL;
  330. }
  331. ipcc_out_signal = data->ipcc_out_signal_pp[data->ipcc_out_signal_pp_idx];
  332. data->ipcc_out_signal_pp_idx = (++data->ipcc_out_signal_pp_idx %
  333. MAX_SDE_HFENCE_OUT_SIGNAL_PING_PONG);
  334. SDE_DEBUG("out-fence ctl_id:%d out_signal:%d hw_fence_client:%s\n",
  335. ctl_id, ipcc_out_signal, _get_client_id_name(data->hw_fence_client_id));
  336. if ((debugfs_hw_fence & SDE_OUTPUT_HW_FENCE_TIMESTAMP) &&
  337. hw_ctl->ops.hw_fence_output_timestamp_ctrl)
  338. hw_ctl->ops.hw_fence_output_timestamp_ctrl(hw_ctl, true, false);
  339. /* update client/signal output fence */
  340. hw_ctl->ops.hw_fence_update_output_fence(hw_ctl, data->ipcc_out_client, ipcc_out_signal);
  341. SDE_EVT32_VERBOSE(ctl_id, ipcc_out_signal);
  342. /* arm dpu to trigger output fence signal once ready */
  343. if (line_count)
  344. hw_ctl->ops.hw_fence_trigger_output_fence(hw_ctl,
  345. HW_FENCE_TRIGGER_SEL_PROG_LINE_COUNT);
  346. else
  347. hw_ctl->ops.hw_fence_trigger_output_fence(hw_ctl, HW_FENCE_TRIGGER_SEL_CTRL_DONE);
  348. return 0;
  349. }
  350. static int _sde_fence_arm_output_hw_fence(struct sde_fence_context *ctx, u32 line_count,
  351. u32 debugfs_hw_fence)
  352. {
  353. struct sde_hw_ctl *hw_ctl = NULL;
  354. struct sde_fence *fc, *next;
  355. spin_lock(&ctx->list_lock);
  356. if (list_empty(&ctx->fence_list_head)) {
  357. spin_unlock(&ctx->list_lock);
  358. return 0;
  359. }
  360. list_for_each_entry_safe(fc, next, &ctx->fence_list_head, fence_list) {
  361. struct dma_fence *fence = &fc->base;
  362. /* this is not hw-fence, or already processed */
  363. if (!test_bit(MSM_HW_FENCE_FLAG_ENABLED_BIT, &fence->flags))
  364. continue;
  365. hw_ctl = fc->hwfence_out_ctl;
  366. if (!hw_ctl) {
  367. /*
  368. * We flaged an output dma-fence as hw-fence but the hw ctl to handle
  369. * it is not available, this should not have happened, but if it does,
  370. * this can translate to a fence-timeout!
  371. */
  372. SDE_ERROR("invalid hw ctl, this can cause a fence-timeout!\n");
  373. SDE_EVT32(SDE_EVTLOG_ERROR, SDE_EVTLOG_FUNC_CASE1, fence->flags,
  374. fence->context, fence->seqno);
  375. spin_unlock(&ctx->list_lock);
  376. return -EINVAL;
  377. }
  378. }
  379. spin_unlock(&ctx->list_lock);
  380. /* arm dpu to trigger output hw-fence ipcc signal upon completion */
  381. if (hw_ctl)
  382. _arm_output_hw_fence(hw_ctl, line_count, debugfs_hw_fence);
  383. return 0;
  384. }
  385. /* update output hw_fences txq */
  386. int sde_fence_update_hw_fences_txq(struct sde_fence_context *ctx, bool vid_mode, u32 line_count,
  387. u32 debugfs_hw_fence)
  388. {
  389. int ret = 0;
  390. struct sde_hw_fence_data *data;
  391. struct sde_fence *fc, *next;
  392. struct sde_hw_ctl *hw_ctl = NULL;
  393. int ctl_id;
  394. bool txq_updated = false;
  395. spin_lock(&ctx->list_lock);
  396. if (list_empty(&ctx->fence_list_head)) {
  397. spin_unlock(&ctx->list_lock);
  398. return 0;
  399. }
  400. list_for_each_entry_safe(fc, next, &ctx->fence_list_head, fence_list) {
  401. struct dma_fence *fence = &fc->base;
  402. /* this is not hw-fence, or already processed */
  403. if (!test_bit(MSM_HW_FENCE_FLAG_ENABLED_BIT, &fence->flags) ||
  404. fc->txq_updated_fence)
  405. continue;
  406. hw_ctl = fc->hwfence_out_ctl;
  407. if (!hw_ctl) {
  408. /* We flaged an output dma-fence as hw-fence but the hw ctl to handle
  409. * it is not available, this should not have happened, but if it does,
  410. * this can translate to a fence-timeout!
  411. */
  412. SDE_ERROR("invalid hw ctl, this can cause a fence-timeout!\n");
  413. SDE_EVT32(SDE_EVTLOG_FUNC_CASE1, fence->flags, fence->context,
  414. fence->seqno, SDE_EVTLOG_ERROR);
  415. ret = -EINVAL;
  416. goto exit;
  417. }
  418. ctl_id = hw_ctl->idx - CTL_0;
  419. data = &hw_ctl->hwfence_data;
  420. if (IS_ERR_OR_NULL(data->hw_fence_handle)) {
  421. SDE_ERROR("unexpected handle for ctl_id:%d, this can fence-timeout\n",
  422. ctl_id);
  423. SDE_EVT32(SDE_EVTLOG_FUNC_CASE2, fence->flags, fence->context,
  424. fence->seqno, ctl_id, SDE_EVTLOG_ERROR);
  425. ret = -EINVAL;
  426. goto exit;
  427. }
  428. /* update hw-fence tx queue */
  429. SDE_EVT32(ctl_id, SDE_EVTLOG_H32(fc->hwfence_index),
  430. SDE_EVTLOG_L32(fc->hwfence_index));
  431. ret = msm_hw_fence_update_txq(data->hw_fence_handle, fc->hwfence_index, 0, 0);
  432. if (ret) {
  433. SDE_ERROR("fail txq update index:%llu fctx:%llu seqno:%llu client:%d\n",
  434. fc->hwfence_index, fence->context, fence->seqno,
  435. data->hw_fence_client_id);
  436. SDE_EVT32(SDE_EVTLOG_FUNC_CASE3, fence->flags, fence->context,
  437. fence->seqno, ctl_id, SDE_EVTLOG_ERROR);
  438. goto exit;
  439. }
  440. /* avoid updating txq more than once and avoid repeating the same fence twice */
  441. txq_updated = fc->txq_updated_fence = true;
  442. SDE_DEBUG("update txq fence:0x%pK ctx:%llu seqno:%llu f:0x%llx ctl:%d vid:%d\n",
  443. fence, fence->context, fence->seqno, fence->flags, ctl_id, vid_mode);
  444. /* We will update TxQ one time per frame */
  445. if (txq_updated)
  446. break;
  447. }
  448. exit:
  449. spin_unlock(&ctx->list_lock);
  450. /* arm dpu to trigger output hw-fence ipcc signal upon completion in vid-mode */
  451. if ((txq_updated && hw_ctl) || line_count)
  452. _sde_fence_arm_output_hw_fence(ctx, line_count, debugfs_hw_fence);
  453. return ret;
  454. }
  455. static void _sde_hw_fence_release(struct sde_fence *f)
  456. {
  457. struct sde_hw_fence_data *data;
  458. struct sde_hw_ctl *hw_ctl = f->hwfence_out_ctl;
  459. int ctl_id;
  460. int ret;
  461. if (!hw_ctl) {
  462. SDE_ERROR("invalid hw_ctl\n");
  463. return;
  464. }
  465. ctl_id = hw_ctl->idx - CTL_0;
  466. data = &hw_ctl->hwfence_data;
  467. if (IS_ERR_OR_NULL(data->hw_fence_handle)) {
  468. SDE_ERROR("unexpected handle for ctl_id:%d\n", ctl_id);
  469. return;
  470. }
  471. SDE_DEBUG("destroy hw fence ctl_id:%d ctx:%llu seqno:%llu name:%s\n",
  472. ctl_id, f->base.context, f->base.seqno, f->name);
  473. /* Delete the HW fence */
  474. ret = msm_hw_fence_destroy(data->hw_fence_handle, &f->base);
  475. if (ret)
  476. SDE_ERROR("failed to destroy hw_fence for ctl_id:%d ctx:%llu seqno:%llu\n", ctl_id,
  477. f->base.context, f->base.seqno);
  478. }
  479. static int _reset_hw_fence_timeline(struct sde_hw_ctl *hw_ctl, u32 flags)
  480. {
  481. struct sde_hw_fence_data *data;
  482. int ret = 0;
  483. data = &hw_ctl->hwfence_data;
  484. if (!IS_ERR_OR_NULL(data->hw_fence_handle)) {
  485. SDE_EVT32(data->hw_fence_client_id);
  486. ret = msm_hw_fence_reset_client(data->hw_fence_handle, flags);
  487. if (ret) {
  488. pr_err("failed to reset client %d\n", data->hw_fence_client_id);
  489. return -EINVAL;
  490. }
  491. }
  492. return ret;
  493. }
  494. int sde_fence_update_input_hw_fence_signal(struct sde_hw_ctl *hw_ctl, u32 debugfs_hw_fence,
  495. struct sde_hw_mdp *hw_mdp, bool disable)
  496. {
  497. struct sde_hw_fence_data *data;
  498. u32 ipcc_signal_id;
  499. u32 ipcc_client_id;
  500. int ctl_id;
  501. /* we must support sw_override as well, so check both functions */
  502. if (!hw_mdp || !hw_ctl || !hw_ctl->ops.hw_fence_update_input_fence ||
  503. !hw_ctl->ops.hw_fence_trigger_sw_override) {
  504. SDE_ERROR("missing ctl/override/update fence %d\n", !hw_ctl);
  505. return -EINVAL;
  506. }
  507. ctl_id = hw_ctl->idx - CTL_0;
  508. data = &hw_ctl->hwfence_data;
  509. if (disable) {
  510. hw_ctl->ops.hw_fence_ctrl(hw_ctl, false, false, 0);
  511. return -EPERM;
  512. }
  513. if ((debugfs_hw_fence & SDE_INPUT_HW_FENCE_TIMESTAMP)
  514. && hw_mdp->ops.hw_fence_input_timestamp_ctrl)
  515. hw_mdp->ops.hw_fence_input_timestamp_ctrl(hw_mdp, true, false);
  516. ipcc_signal_id = data->ipcc_in_signal;
  517. ipcc_client_id = data->ipcc_in_client;
  518. SDE_DEBUG("configure input signal:%d out client:%d ctl_id:%d\n", ipcc_signal_id,
  519. ipcc_client_id, ctl_id);
  520. SDE_EVT32(ctl_id, ipcc_signal_id, ipcc_client_id);
  521. /* configure dpu hw for the client/signal pair signaling input-fence */
  522. hw_ctl->ops.hw_fence_update_input_fence(hw_ctl, ipcc_client_id, ipcc_signal_id);
  523. /* Enable hw-fence for this ctrl-path */
  524. hw_ctl->ops.hw_fence_ctrl(hw_ctl, true, true, 1);
  525. return 0;
  526. }
  527. void *sde_sync_get(uint64_t fd)
  528. {
  529. /* force signed compare, fdget accepts an int argument */
  530. return (signed int)fd >= 0 ? sync_file_get_fence(fd) : NULL;
  531. }
  532. void sde_sync_put(void *fence)
  533. {
  534. if (fence)
  535. dma_fence_put(fence);
  536. }
  537. void sde_fence_dump(struct dma_fence *fence)
  538. {
  539. char timeline_str[TIMELINE_VAL_LENGTH];
  540. if (fence->ops->timeline_value_str)
  541. fence->ops->timeline_value_str(fence, timeline_str, TIMELINE_VAL_LENGTH);
  542. SDE_ERROR(
  543. "fence drv name:%s timeline name:%s seqno:0x%llx timeline:%s signaled:0x%x status:%d flags:0x%x\n",
  544. fence->ops->get_driver_name(fence),
  545. fence->ops->get_timeline_name(fence),
  546. fence->seqno, timeline_str,
  547. fence->ops->signaled ?
  548. fence->ops->signaled(fence) : 0xffffffff,
  549. dma_fence_get_status(fence), fence->flags);
  550. }
  551. static void sde_fence_dump_user_fds_info(struct dma_fence *base_fence)
  552. {
  553. struct dma_fence_array *array;
  554. struct dma_fence *user_fence;
  555. int i;
  556. array = container_of(base_fence, struct dma_fence_array, base);
  557. if (test_bit(SPEC_FENCE_FLAG_FENCE_ARRAY, &base_fence->flags) &&
  558. test_bit(SPEC_FENCE_FLAG_ARRAY_BIND, &base_fence->flags)) {
  559. for (i = 0; i < array->num_fences; i++) {
  560. user_fence = array->fences[i];
  561. if (user_fence) {
  562. dma_fence_get(user_fence);
  563. sde_fence_dump(user_fence);
  564. dma_fence_put(user_fence);
  565. }
  566. }
  567. }
  568. }
  569. signed long sde_sync_wait(void *fnc, long timeout_ms)
  570. {
  571. struct dma_fence *fence = fnc;
  572. int rc, status = 0;
  573. if (!fence)
  574. return -EINVAL;
  575. else if (dma_fence_is_signaled(fence))
  576. return timeout_ms ? msecs_to_jiffies(timeout_ms) : 1;
  577. rc = dma_fence_wait_timeout(fence, true,
  578. msecs_to_jiffies(timeout_ms));
  579. if (!rc || (rc == -EINVAL) || fence->error) {
  580. status = dma_fence_get_status(fence);
  581. if (test_bit(SPEC_FENCE_FLAG_FENCE_ARRAY, &fence->flags)) {
  582. if (status == -EINVAL) {
  583. SDE_INFO("spec fence bind failure status:%d\n", status);
  584. rc = -EBADF;
  585. } else if (fence->ops->signaled && fence->ops->signaled(fence)) {
  586. SDE_INFO("spec fence status:%d\n", status);
  587. } else {
  588. sde_fence_dump(fence);
  589. sde_fence_dump_user_fds_info(fence);
  590. }
  591. } else {
  592. sde_fence_dump(fence);
  593. }
  594. }
  595. return rc;
  596. }
  597. uint32_t sde_sync_get_name_prefix(void *fence)
  598. {
  599. const char *name;
  600. uint32_t i, prefix;
  601. struct dma_fence *f = fence;
  602. if (!fence)
  603. return 0;
  604. name = f->ops->get_driver_name(f);
  605. if (!name)
  606. return 0;
  607. prefix = 0x0;
  608. for (i = 0; i < sizeof(uint32_t) && name[i]; ++i)
  609. prefix = (prefix << CHAR_BIT) | name[i];
  610. return prefix;
  611. }
  612. static void sde_fence_destroy(struct kref *kref)
  613. {
  614. struct sde_fence_context *ctx;
  615. if (!kref) {
  616. SDE_ERROR("received invalid kref\n");
  617. return;
  618. }
  619. ctx = container_of(kref, struct sde_fence_context, kref);
  620. kfree(ctx);
  621. }
  622. static inline struct sde_fence *to_sde_fence(struct dma_fence *fence)
  623. {
  624. return container_of(fence, struct sde_fence, base);
  625. }
  626. static const char *sde_fence_get_driver_name(struct dma_fence *fence)
  627. {
  628. struct sde_fence *f = to_sde_fence(fence);
  629. return f->name;
  630. }
  631. static const char *sde_fence_get_timeline_name(struct dma_fence *fence)
  632. {
  633. struct sde_fence *f = to_sde_fence(fence);
  634. return f->ctx->name;
  635. }
  636. static bool sde_fence_enable_signaling(struct dma_fence *fence)
  637. {
  638. return true;
  639. }
  640. static bool sde_fence_signaled(struct dma_fence *fence)
  641. {
  642. struct sde_fence *f = to_sde_fence(fence);
  643. bool status;
  644. status = ((int)(fence->seqno - f->ctx->done_count) <= 0);
  645. SDE_DEBUG("status:%d fence seq:%llu and timeline:%u\n",
  646. status, fence->seqno, f->ctx->done_count);
  647. return status;
  648. }
  649. static void sde_fence_release(struct dma_fence *fence)
  650. {
  651. struct sde_fence *f;
  652. if (fence) {
  653. f = to_sde_fence(fence);
  654. /* Delete the HW fence */
  655. if (test_bit(MSM_HW_FENCE_FLAG_ENABLED_BIT, &fence->flags))
  656. _sde_hw_fence_release(f);
  657. kref_put(&f->ctx->kref, sde_fence_destroy);
  658. kfree(f);
  659. }
  660. }
  661. static void sde_fence_value_str(struct dma_fence *fence, char *str, int size)
  662. {
  663. if (!fence || !str)
  664. return;
  665. snprintf(str, size, "%llu", fence->seqno);
  666. }
  667. static void sde_fence_timeline_value_str(struct dma_fence *fence, char *str,
  668. int size)
  669. {
  670. struct sde_fence *f = to_sde_fence(fence);
  671. if (!fence || !f->ctx || !str)
  672. return;
  673. snprintf(str, size, "%d", f->ctx->done_count);
  674. }
  675. static struct dma_fence_ops sde_fence_ops = {
  676. .get_driver_name = sde_fence_get_driver_name,
  677. .get_timeline_name = sde_fence_get_timeline_name,
  678. .enable_signaling = sde_fence_enable_signaling,
  679. .signaled = sde_fence_signaled,
  680. .wait = dma_fence_default_wait,
  681. .release = sde_fence_release,
  682. .fence_value_str = sde_fence_value_str,
  683. .timeline_value_str = sde_fence_timeline_value_str,
  684. };
  685. /**
  686. * _sde_fence_create_fd - create fence object and return an fd for it
  687. * This function is NOT thread-safe.
  688. * @timeline: Timeline to associate with fence
  689. * @val: Timeline value at which to signal the fence
  690. * Return: File descriptor on success, or error code on error
  691. */
  692. static int _sde_fence_create_fd(void *fence_ctx, uint32_t val, struct sde_hw_ctl *hw_ctl)
  693. {
  694. struct sde_fence *sde_fence;
  695. struct sync_file *sync_file;
  696. signed int fd = -EINVAL;
  697. struct sde_fence_context *ctx = fence_ctx;
  698. if (!ctx) {
  699. SDE_ERROR("invalid context\n");
  700. goto exit;
  701. }
  702. sde_fence = kzalloc(sizeof(*sde_fence), GFP_KERNEL);
  703. if (!sde_fence)
  704. return -ENOMEM;
  705. sde_fence->ctx = fence_ctx;
  706. snprintf(sde_fence->name, SDE_FENCE_NAME_SIZE, "sde_fence:%s:%u",
  707. sde_fence->ctx->name, val);
  708. dma_fence_init(&sde_fence->base, &sde_fence_ops, &ctx->lock,
  709. ctx->context, val);
  710. kref_get(&ctx->kref);
  711. /* create fd */
  712. fd = get_unused_fd_flags(0);
  713. if (fd < 0) {
  714. SDE_ERROR("failed to get_unused_fd_flags(), %s\n",
  715. sde_fence->name);
  716. dma_fence_put(&sde_fence->base);
  717. goto exit;
  718. }
  719. /* create fence */
  720. sync_file = sync_file_create(&sde_fence->base);
  721. if (sync_file == NULL) {
  722. put_unused_fd(fd);
  723. fd = -EINVAL;
  724. SDE_ERROR("couldn't create fence, %s\n", sde_fence->name);
  725. dma_fence_put(&sde_fence->base);
  726. goto exit;
  727. }
  728. /* If ctl_id is valid, try to create a hw-fence */
  729. if (hw_ctl)
  730. sde_fence_create_hw_fence(hw_ctl, sde_fence);
  731. fd_install(fd, sync_file->file);
  732. sde_fence->fd = fd;
  733. spin_lock(&ctx->list_lock);
  734. list_add_tail(&sde_fence->fence_list, &ctx->fence_list_head);
  735. spin_unlock(&ctx->list_lock);
  736. exit:
  737. return fd;
  738. }
  739. struct sde_fence_context *sde_fence_init(const char *name, uint32_t drm_id)
  740. {
  741. struct sde_fence_context *ctx;
  742. if (!name) {
  743. SDE_ERROR("invalid argument(s)\n");
  744. return ERR_PTR(-EINVAL);
  745. }
  746. ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
  747. if (!ctx) {
  748. SDE_ERROR("failed to alloc fence ctx\n");
  749. return ERR_PTR(-ENOMEM);
  750. }
  751. strlcpy(ctx->name, name, ARRAY_SIZE(ctx->name));
  752. ctx->drm_id = drm_id;
  753. kref_init(&ctx->kref);
  754. ctx->context = dma_fence_context_alloc(1);
  755. spin_lock_init(&ctx->lock);
  756. spin_lock_init(&ctx->list_lock);
  757. INIT_LIST_HEAD(&ctx->fence_list_head);
  758. return ctx;
  759. }
  760. void sde_fence_deinit(struct sde_fence_context *ctx)
  761. {
  762. if (!ctx) {
  763. SDE_ERROR("invalid fence\n");
  764. return;
  765. }
  766. kref_put(&ctx->kref, sde_fence_destroy);
  767. }
  768. void sde_fence_prepare(struct sde_fence_context *ctx)
  769. {
  770. unsigned long flags;
  771. if (!ctx) {
  772. SDE_ERROR("invalid argument(s), fence %pK\n", ctx);
  773. } else {
  774. spin_lock_irqsave(&ctx->lock, flags);
  775. ++ctx->commit_count;
  776. spin_unlock_irqrestore(&ctx->lock, flags);
  777. }
  778. }
  779. static void _sde_fence_trigger(struct sde_fence_context *ctx, bool error, ktime_t ts)
  780. {
  781. unsigned long flags;
  782. struct sde_fence *fc, *next;
  783. bool is_signaled = false;
  784. kref_get(&ctx->kref);
  785. spin_lock(&ctx->list_lock);
  786. if (list_empty(&ctx->fence_list_head)) {
  787. SDE_DEBUG("nothing to trigger!\n");
  788. goto end;
  789. }
  790. list_for_each_entry_safe(fc, next, &ctx->fence_list_head, fence_list) {
  791. spin_lock_irqsave(&ctx->lock, flags);
  792. if (error)
  793. dma_fence_set_error(&fc->base, -EBUSY);
  794. is_signaled = sde_fence_signaled(&fc->base);
  795. if (is_signaled)
  796. dma_fence_signal_timestamp_locked(&fc->base, ts);
  797. spin_unlock_irqrestore(&ctx->lock, flags);
  798. if (is_signaled) {
  799. list_del_init(&fc->fence_list);
  800. dma_fence_put(&fc->base);
  801. }
  802. }
  803. end:
  804. spin_unlock(&ctx->list_lock);
  805. kref_put(&ctx->kref, sde_fence_destroy);
  806. }
  807. int sde_fence_create(struct sde_fence_context *ctx, uint64_t *val,
  808. uint32_t offset, struct sde_hw_ctl *hw_ctl)
  809. {
  810. uint32_t trigger_value;
  811. int fd, rc = -EINVAL;
  812. unsigned long flags;
  813. if (!ctx || !val) {
  814. SDE_ERROR("invalid argument(s), fence %d, pval %d\n",
  815. ctx != NULL, val != NULL);
  816. return rc;
  817. }
  818. /*
  819. * Allow created fences to have a constant offset with respect
  820. * to the timeline. This allows us to delay the fence signalling
  821. * w.r.t. the commit completion (e.g., an offset of +1 would
  822. * cause fences returned during a particular commit to signal
  823. * after an additional delay of one commit, rather than at the
  824. * end of the current one.
  825. */
  826. spin_lock_irqsave(&ctx->lock, flags);
  827. trigger_value = ctx->commit_count + offset;
  828. spin_unlock_irqrestore(&ctx->lock, flags);
  829. fd = _sde_fence_create_fd(ctx, trigger_value, hw_ctl);
  830. *val = fd;
  831. SDE_DEBUG("fd:%d trigger:%d commit:%d offset:%d\n",
  832. fd, trigger_value, ctx->commit_count, offset);
  833. SDE_EVT32(ctx->drm_id, trigger_value, fd, hw_ctl ? hw_ctl->idx : 0);
  834. rc = (fd >= 0) ? 0 : fd;
  835. return rc;
  836. }
  837. void sde_fence_signal(struct sde_fence_context *ctx, ktime_t ts,
  838. enum sde_fence_event fence_event, struct sde_hw_ctl *hw_ctl)
  839. {
  840. unsigned long flags;
  841. if (!ctx) {
  842. SDE_ERROR("invalid ctx, %pK\n", ctx);
  843. return;
  844. }
  845. spin_lock_irqsave(&ctx->lock, flags);
  846. if (fence_event == SDE_FENCE_RESET_TIMELINE) {
  847. /* reset hw-fences without error */
  848. if (hw_ctl)
  849. _reset_hw_fence_timeline(hw_ctl, MSM_HW_FENCE_RESET_WITHOUT_ERROR |
  850. MSM_HW_FENCE_RESET_WITHOUT_DESTROY);
  851. if ((int)(ctx->done_count - ctx->commit_count) < 0) {
  852. SDE_DEBUG(
  853. "timeline reset attempt! ctx:0x%x done count:%d commit:%d\n",
  854. ctx->drm_id, ctx->done_count, ctx->commit_count);
  855. ctx->done_count = ctx->commit_count;
  856. SDE_EVT32(ctx->drm_id, ctx->done_count,
  857. ctx->commit_count, ktime_to_us(ts),
  858. fence_event, SDE_EVTLOG_FUNC_CASE1);
  859. } else {
  860. spin_unlock_irqrestore(&ctx->lock, flags);
  861. return;
  862. }
  863. } else if ((int)(ctx->done_count - ctx->commit_count) < 0) {
  864. ++ctx->done_count;
  865. SDE_DEBUG("fence_signal:done count:%d commit count:%d\n",
  866. ctx->done_count, ctx->commit_count);
  867. } else {
  868. SDE_ERROR("extra signal attempt! done count:%d commit:%d\n",
  869. ctx->done_count, ctx->commit_count);
  870. SDE_EVT32(ctx->drm_id, ctx->done_count, ctx->commit_count,
  871. ktime_to_us(ts), fence_event, SDE_EVTLOG_FATAL);
  872. spin_unlock_irqrestore(&ctx->lock, flags);
  873. return;
  874. }
  875. spin_unlock_irqrestore(&ctx->lock, flags);
  876. SDE_EVT32(ctx->drm_id, ctx->done_count, ctx->commit_count,
  877. ktime_to_us(ts));
  878. _sde_fence_trigger(ctx, (fence_event == SDE_FENCE_SIGNAL_ERROR), ts);
  879. }
  880. void sde_fence_timeline_status(struct sde_fence_context *ctx,
  881. struct drm_mode_object *drm_obj)
  882. {
  883. char *obj_name;
  884. if (!ctx || !drm_obj) {
  885. SDE_ERROR("invalid input params\n");
  886. return;
  887. }
  888. switch (drm_obj->type) {
  889. case DRM_MODE_OBJECT_CRTC:
  890. obj_name = "crtc";
  891. break;
  892. case DRM_MODE_OBJECT_CONNECTOR:
  893. obj_name = "connector";
  894. break;
  895. default:
  896. obj_name = "unknown";
  897. break;
  898. }
  899. SDE_ERROR("drm obj:%s id:%d type:0x%x done_count:%d commit_count:%d\n",
  900. obj_name, drm_obj->id, drm_obj->type, ctx->done_count,
  901. ctx->commit_count);
  902. }
  903. void sde_fence_list_dump(struct dma_fence *fence, struct seq_file **s)
  904. {
  905. char timeline_str[TIMELINE_VAL_LENGTH];
  906. if (fence->ops->timeline_value_str)
  907. fence->ops->timeline_value_str(fence,
  908. timeline_str, TIMELINE_VAL_LENGTH);
  909. seq_printf(*s, "fence name:%s timeline name:%s seqno:0x%llx timeline:%s signaled:0x%x\n",
  910. fence->ops->get_driver_name(fence),
  911. fence->ops->get_timeline_name(fence),
  912. fence->seqno, timeline_str,
  913. fence->ops->signaled ?
  914. fence->ops->signaled(fence) : 0xffffffff);
  915. }
  916. void sde_debugfs_timeline_dump(struct sde_fence_context *ctx,
  917. struct drm_mode_object *drm_obj, struct seq_file **s)
  918. {
  919. char *obj_name;
  920. struct sde_fence *fc, *next;
  921. struct dma_fence *fence;
  922. if (!ctx || !drm_obj) {
  923. SDE_ERROR("invalid input params\n");
  924. return;
  925. }
  926. switch (drm_obj->type) {
  927. case DRM_MODE_OBJECT_CRTC:
  928. obj_name = "crtc";
  929. break;
  930. case DRM_MODE_OBJECT_CONNECTOR:
  931. obj_name = "connector";
  932. break;
  933. default:
  934. obj_name = "unknown";
  935. break;
  936. }
  937. seq_printf(*s, "drm obj:%s id:%d type:0x%x done_count:%d commit_count:%d\n",
  938. obj_name, drm_obj->id, drm_obj->type, ctx->done_count,
  939. ctx->commit_count);
  940. spin_lock(&ctx->list_lock);
  941. list_for_each_entry_safe(fc, next, &ctx->fence_list_head, fence_list) {
  942. fence = &fc->base;
  943. sde_fence_list_dump(fence, s);
  944. }
  945. spin_unlock(&ctx->list_lock);
  946. }