sde_fence.c 30 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
  4. * Copyright (c) 2016-2021, The Linux Foundation. All rights reserved.
  5. */
  6. #define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
  7. #include <linux/sync_file.h>
  8. #include <linux/dma-fence.h>
  9. #include <linux/dma-fence-array.h>
  10. #include <linux/file.h>
  11. #include "msm_drv.h"
  12. #include "sde_kms.h"
  13. #include "sde_fence.h"
  14. #define TIMELINE_VAL_LENGTH 128
  15. #define SPEC_FENCE_FLAG_FENCE_ARRAY 0x10
  16. #define SPEC_FENCE_FLAG_ARRAY_BIND 0x11
  17. /**
  18. * struct sde_fence - release/retire fence structure
  19. * @base: base fence structure
  20. * @ctx: fence context
  21. * @name: name of each fence- it is fence timeline + commit_count
  22. * @fence_list: list to associated this fence on timeline/context
  23. * @fd: fd attached to this fence - debugging purpose.
  24. * @hwfence_out_ctl: hw ctl for the output fence
  25. * @hwfence_index: hw fence index for this fence
  26. * @txq_updated_fence: flag to indicate that a fence has been updated in txq
  27. */
  28. struct sde_fence {
  29. struct dma_fence base;
  30. struct sde_fence_context *ctx;
  31. char name[SDE_FENCE_NAME_SIZE];
  32. struct list_head fence_list;
  33. int fd;
  34. struct sde_hw_ctl *hwfence_out_ctl;
  35. u64 hwfence_index;
  36. bool txq_updated_fence;
  37. };
  38. /**
  39. * enum sde_hw_fence_clients - sde clients for the hw-fence feature
  40. *
  41. * Do not modify the order of this struct and/or add more elements
  42. * without modify/add fields in the 'hw_fence_data' structs.
  43. */
  44. enum sde_hw_fence_clients {
  45. SDE_HW_FENCE_CLIENT_CTL_0,
  46. SDE_HW_FENCE_CLIENT_CTL_1,
  47. SDE_HW_FENCE_CLIENT_CTL_2,
  48. SDE_HW_FENCE_CLIENT_CTL_3,
  49. SDE_HW_FENCE_CLIENT_CTL_4,
  50. SDE_HW_FENCE_CLIENT_CTL_5,
  51. SDE_HW_FENCE_CLIENT_MAX,
  52. };
  53. /**
  54. * hw_fence_data_dpu_client - this table maps the dpu ipcc input and output signals for each display
  55. * clients to communicate with the fence controller.
  56. * This struct must match the order of the 'sde_hw_fence_clients' enum,
  57. * the output signal must match with the signals that FenceCTL expects for each display client.
  58. * This 'hw_fence_data_dpu_client' must be used for HW that does not support dpu-signal.
  59. */
  60. struct sde_hw_fence_data hw_fence_data_no_dpu[SDE_HW_FENCE_CLIENT_MAX] = {
  61. {SDE_HW_FENCE_CLIENT_CTL_0, HW_FENCE_CLIENT_ID_CTL0, NULL, {0}, 8, 14, {2, 3}, 0, 8, 8,
  62. 0, 0},
  63. {SDE_HW_FENCE_CLIENT_CTL_1, HW_FENCE_CLIENT_ID_CTL1, NULL, {0}, 8, 15, {4, 5}, 0, 8, 8,
  64. 0, 0},
  65. {SDE_HW_FENCE_CLIENT_CTL_2, HW_FENCE_CLIENT_ID_CTL2, NULL, {0}, 8, 16, {6, 7}, 0, 8, 8,
  66. 0, 0},
  67. {SDE_HW_FENCE_CLIENT_CTL_3, HW_FENCE_CLIENT_ID_CTL3, NULL, {0}, 8, 17, {8, 9}, 0, 8, 8,
  68. 0, 0},
  69. {SDE_HW_FENCE_CLIENT_CTL_4, HW_FENCE_CLIENT_ID_CTL4, NULL, {0}, 8, 18, {10, 11}, 0, 8, 8,
  70. 0, 0},
  71. {SDE_HW_FENCE_CLIENT_CTL_5, HW_FENCE_CLIENT_ID_CTL5, NULL, {0}, 8, 19, {12, 13}, 0, 8, 8,
  72. 0, 0}
  73. };
  74. /**
  75. * hw_fence_data_dpu_client - this table maps the dpu ipcc input and output signals for each display
  76. * clients to communicate with the fence controller.
  77. * This struct must match the order of the 'sde_hw_fence_clients' enum,
  78. * the output signal must match with the signals that FenceCTL expects for each display client.
  79. * This 'hw_fence_data_dpu_client' must be used for HW that supports dpu-signal
  80. */
  81. struct sde_hw_fence_data hw_fence_data_dpu_client[SDE_HW_FENCE_CLIENT_MAX] = {
  82. {SDE_HW_FENCE_CLIENT_CTL_0, HW_FENCE_CLIENT_ID_CTL0, NULL, {0}, 8, 0, {0, 6}, 0, 8, 25,
  83. 0, 0},
  84. {SDE_HW_FENCE_CLIENT_CTL_1, HW_FENCE_CLIENT_ID_CTL1, NULL, {0}, 8, 1, {1, 7}, 0, 8, 25,
  85. 0, 0},
  86. {SDE_HW_FENCE_CLIENT_CTL_2, HW_FENCE_CLIENT_ID_CTL2, NULL, {0}, 8, 2, {2, 8}, 0, 8, 25,
  87. 0, 0},
  88. {SDE_HW_FENCE_CLIENT_CTL_3, HW_FENCE_CLIENT_ID_CTL3, NULL, {0}, 8, 3, {3, 9}, 0, 8, 25,
  89. 0, 0},
  90. {SDE_HW_FENCE_CLIENT_CTL_4, HW_FENCE_CLIENT_ID_CTL4, NULL, {0}, 8, 4, {4, 10}, 0, 8, 25,
  91. 0, 0},
  92. {SDE_HW_FENCE_CLIENT_CTL_5, HW_FENCE_CLIENT_ID_CTL5, NULL, {0}, 8, 5, {5, 11}, 0, 8, 25,
  93. 0, 0}
  94. };
  95. int sde_hw_fence_init(struct sde_hw_ctl *hw_ctl, bool use_dpu_ipcc)
  96. {
  97. struct sde_hw_fence_data *sde_hw_fence_data;
  98. struct sde_hw_fence_data *hwfence_data;
  99. int ctl_id;
  100. if (!hw_ctl)
  101. return -EINVAL;
  102. ctl_id = hw_ctl->idx - CTL_0;
  103. if (ctl_id >= SDE_HW_FENCE_CLIENT_MAX || ctl_id < 0) {
  104. SDE_ERROR("unexpected ctl_id:%d\n", ctl_id);
  105. return -EINVAL;
  106. }
  107. hwfence_data = &hw_ctl->hwfence_data;
  108. sde_hw_fence_data = use_dpu_ipcc ? hw_fence_data_dpu_client : hw_fence_data_no_dpu;
  109. if (sde_hw_fence_data[ctl_id].client_id != ctl_id) {
  110. SDE_ERROR("Unexpected client_id:%d for ctl_id:%d\n",
  111. sde_hw_fence_data[ctl_id].client_id, ctl_id);
  112. return -EINVAL;
  113. }
  114. /* init the default fence-data for this client */
  115. memcpy(hwfence_data, &sde_hw_fence_data[ctl_id], sizeof(struct sde_hw_fence_data));
  116. SDE_DEBUG("hwfence register ctl:%d client:%d\n", ctl_id, hwfence_data->hw_fence_client_id);
  117. hwfence_data->hw_fence_handle = msm_hw_fence_register(hwfence_data->hw_fence_client_id,
  118. &hwfence_data->mem_descriptor);
  119. hwfence_data->dma_context = dma_fence_context_alloc(1);
  120. if (IS_ERR_OR_NULL(hwfence_data->hw_fence_handle)) {
  121. hwfence_data->hw_fence_handle = NULL;
  122. SDE_ERROR("error cannot register ctl_id:%d hw-fence client:%d\n", ctl_id,
  123. hwfence_data->hw_fence_client_id);
  124. return -EINVAL;
  125. }
  126. SDE_DEBUG("hwfence registered ctl_id:%d hw_fence_client_id:%d handle:0x%p\n",
  127. ctl_id, hwfence_data->hw_fence_client_id, hwfence_data->hw_fence_handle);
  128. return 0;
  129. }
  130. void sde_hw_fence_deinit(struct sde_hw_ctl *hw_ctl)
  131. {
  132. struct sde_hw_fence_data *hwfence_data;
  133. if (!hw_ctl)
  134. return;
  135. hwfence_data = &hw_ctl->hwfence_data;
  136. /* client was not registered */
  137. if (IS_ERR_OR_NULL(hwfence_data->hw_fence_handle))
  138. return;
  139. SDE_DEBUG("hwfence deregister ctl_id:%d hw_fence_client_id:%d\n",
  140. hw_ctl->idx - CTL_0, hwfence_data->hw_fence_client_id);
  141. msm_hw_fence_deregister(hwfence_data->hw_fence_handle);
  142. hwfence_data->hw_fence_handle = NULL;
  143. }
  144. static int sde_fence_create_hw_fence(struct sde_hw_ctl *hw_ctl, struct sde_fence *sde_fence)
  145. {
  146. struct sde_hw_fence_data *data;
  147. struct msm_hw_fence_create_params params;
  148. int ctl_id;
  149. u64 hwfence_index;
  150. int ret;
  151. if (!hw_ctl)
  152. return -EINVAL;
  153. ctl_id = hw_ctl->idx - CTL_0;
  154. data = &hw_ctl->hwfence_data;
  155. if (IS_ERR_OR_NULL(data->hw_fence_handle)) {
  156. SDE_ERROR("unexpected handle for ctl_id:%d\n", ctl_id);
  157. return -EINVAL;
  158. }
  159. params.fence = &sde_fence->base;
  160. params.handle = &hwfence_index;
  161. /* Create the HW fence */
  162. ret = msm_hw_fence_create(data->hw_fence_handle, &params);
  163. if (ret) {
  164. SDE_ERROR("failed to create hw_fence for client:%d ctx:%llu seqno:%llu\n", ctl_id,
  165. sde_fence->base.context, sde_fence->base.seqno);
  166. } else {
  167. /* store ctl and index for this fence */
  168. sde_fence->hwfence_out_ctl = hw_ctl;
  169. sde_fence->hwfence_index = hwfence_index;
  170. SDE_DEBUG("create hfence index:%llu ctl:%d ctx:%llu seqno:%llu name:%s\n",
  171. sde_fence->hwfence_index, ctl_id, sde_fence->base.context,
  172. sde_fence->base.seqno, sde_fence->name);
  173. }
  174. return ret;
  175. }
  176. static inline char *_get_client_id_name(int hw_fence_client_id)
  177. {
  178. switch (hw_fence_client_id) {
  179. case HW_FENCE_CLIENT_ID_CTX0:
  180. return "HW_FENCE_CLIENT_ID_CTX0";
  181. case HW_FENCE_CLIENT_ID_CTL0:
  182. return "HW_FENCE_CLIENT_ID_CTL0";
  183. case HW_FENCE_CLIENT_ID_CTL1:
  184. return "HW_FENCE_CLIENT_ID_CTL1";
  185. case HW_FENCE_CLIENT_ID_CTL2:
  186. return "HW_FENCE_CLIENT_ID_CTL2";
  187. case HW_FENCE_CLIENT_ID_CTL3:
  188. return "HW_FENCE_CLIENT_ID_CTL3";
  189. case HW_FENCE_CLIENT_ID_CTL4:
  190. return "HW_FENCE_CLIENT_ID_CTL4";
  191. case HW_FENCE_CLIENT_ID_CTL5:
  192. return "HW_FENCE_CLIENT_ID_CTL15";
  193. default:
  194. return "Unknown";
  195. }
  196. return "unknown";
  197. }
  198. static void _cleanup_fences_refcount(struct dma_fence **fences, u32 num_fences)
  199. {
  200. int i;
  201. for (i = 0; i < num_fences; i++)
  202. dma_fence_put(fences[i]);
  203. }
  204. int sde_fence_register_hw_fences_wait(struct sde_hw_ctl *hw_ctl, struct dma_fence **fences,
  205. u32 num_fences)
  206. {
  207. struct sde_hw_fence_data *data;
  208. int i, j, ret;
  209. int ctl_id;
  210. struct dma_fence_array *temp_array = NULL;
  211. struct dma_fence *base_fence;
  212. struct dma_fence **hw_fences;
  213. u32 num_hw_fences;
  214. struct dma_fence **fence_list;
  215. struct dma_fence_array *array = NULL;
  216. int array_childs = 0;
  217. int array_count = 0;
  218. int fence_list_index = 0;
  219. if (!hw_ctl) {
  220. SDE_ERROR("wrong ctl\n");
  221. return -EINVAL;
  222. }
  223. ctl_id = hw_ctl->idx - CTL_0;
  224. data = &hw_ctl->hwfence_data;
  225. if (IS_ERR_OR_NULL(data->hw_fence_handle)) {
  226. SDE_ERROR("unexpected handle for ctl_id:%d\n", ctl_id);
  227. return -EINVAL;
  228. }
  229. SDE_DEBUG("register for wait fences:%d ctl_id:%d hw_fence_client:%s\n",
  230. num_fences, ctl_id, _get_client_id_name(data->hw_fence_client_id));
  231. for (i = 0; i < num_fences; i++) {
  232. /* get a refcount for each of the fences */
  233. dma_fence_get(fences[i]);
  234. if (dma_fence_is_array(fences[i])) {
  235. array_count++;
  236. array = container_of(fences[i], struct dma_fence_array, base);
  237. array_childs += array->num_fences;
  238. }
  239. SDE_DEBUG("registering fence: ctx:%llu seqno:%llu\n",
  240. (fences[i])->context, (fences[i])->seqno);
  241. }
  242. if (num_fences > 1) {
  243. /* fence_list memory is freed during fence-array release */
  244. fence_list = kzalloc(((num_fences - array_count) + array_childs)
  245. * (sizeof(struct dma_fence *)), GFP_KERNEL);
  246. if (!fence_list) {
  247. _cleanup_fences_refcount(fences, num_fences);
  248. return -EINVAL;
  249. }
  250. /* populate fence_list with the fences */
  251. for (i = 0; i < num_fences; i++) {
  252. if (dma_fence_is_array(fences[i])) {
  253. array = container_of(fences[i], struct dma_fence_array, base);
  254. for (j = 0; j < array->num_fences; j++) {
  255. /* get a refcount for each of the child fences */
  256. dma_fence_get(array->fences[j]);
  257. fence_list[fence_list_index++] = array->fences[j];
  258. }
  259. /* remove refcount on parent */
  260. dma_fence_put(fences[i]);
  261. } else {
  262. fence_list[fence_list_index++] = fences[i];
  263. }
  264. }
  265. temp_array = dma_fence_array_create(fence_list_index, fence_list,
  266. data->dma_context, data->hw_fence_array_seqno++, 0);
  267. if (!temp_array) {
  268. SDE_ERROR("unable to create fence array, cant register for wait\n");
  269. _cleanup_fences_refcount(fences, num_fences);
  270. kfree(fence_list);
  271. return -EINVAL;
  272. }
  273. base_fence = &temp_array->base;
  274. hw_fences = &base_fence;
  275. num_hw_fences = 1;
  276. } else {
  277. hw_fences = fences;
  278. num_hw_fences = num_fences;
  279. }
  280. /* register for wait */
  281. ret = msm_hw_fence_wait_update(data->hw_fence_handle, hw_fences, num_hw_fences, true);
  282. if (ret)
  283. SDE_ERROR("failed to register wait fences for ctl_id:%d ret:%d\n", ctl_id, ret);
  284. /* fence-array put will release each individual extra refcount during array release */
  285. if (temp_array)
  286. dma_fence_put(&temp_array->base);
  287. else
  288. dma_fence_put(fences[0]);
  289. SDE_EVT32_VERBOSE(ctl_id, num_fences, ret);
  290. return ret;
  291. }
  292. static int _arm_output_hw_fence(struct sde_hw_ctl *hw_ctl, u32 line_count, u32 debugfs_hw_fence)
  293. {
  294. struct sde_hw_fence_data *data;
  295. u32 ipcc_out_signal;
  296. int ctl_id;
  297. if (!hw_ctl || !hw_ctl->ops.hw_fence_trigger_output_fence ||
  298. !hw_ctl->ops.hw_fence_update_output_fence) {
  299. SDE_ERROR("missing ctl/trigger or update fence %d\n", !hw_ctl);
  300. return -EINVAL;
  301. }
  302. ctl_id = hw_ctl->idx - CTL_0;
  303. data = &hw_ctl->hwfence_data;
  304. if (data->ipcc_out_signal_pp_idx >= MAX_SDE_HFENCE_OUT_SIGNAL_PING_PONG) {
  305. /* This should not have happened!, review the ping pong calculation */
  306. SDE_ERROR("Wrong pp_idx:%d, max:%d\n", data->ipcc_out_signal_pp_idx,
  307. MAX_SDE_HFENCE_OUT_SIGNAL_PING_PONG);
  308. return -EINVAL;
  309. }
  310. ipcc_out_signal = data->ipcc_out_signal_pp[data->ipcc_out_signal_pp_idx];
  311. data->ipcc_out_signal_pp_idx = (++data->ipcc_out_signal_pp_idx %
  312. MAX_SDE_HFENCE_OUT_SIGNAL_PING_PONG);
  313. SDE_DEBUG("out-fence ctl_id:%d out_signal:%d hw_fence_client:%s\n",
  314. ctl_id, ipcc_out_signal, _get_client_id_name(data->hw_fence_client_id));
  315. if ((debugfs_hw_fence & SDE_OUTPUT_HW_FENCE_TIMESTAMP) &&
  316. hw_ctl->ops.hw_fence_output_timestamp_ctrl)
  317. hw_ctl->ops.hw_fence_output_timestamp_ctrl(hw_ctl, true, false);
  318. /* update client/signal output fence */
  319. hw_ctl->ops.hw_fence_update_output_fence(hw_ctl, data->ipcc_out_client, ipcc_out_signal);
  320. SDE_EVT32_VERBOSE(ctl_id, ipcc_out_signal);
  321. /* arm dpu to trigger output fence signal once ready */
  322. if (line_count)
  323. hw_ctl->ops.hw_fence_trigger_output_fence(hw_ctl,
  324. HW_FENCE_TRIGGER_SEL_PROG_LINE_COUNT);
  325. else
  326. hw_ctl->ops.hw_fence_trigger_output_fence(hw_ctl, HW_FENCE_TRIGGER_SEL_CTRL_DONE);
  327. return 0;
  328. }
  329. static int _sde_fence_arm_output_hw_fence(struct sde_fence_context *ctx, u32 line_count,
  330. u32 debugfs_hw_fence)
  331. {
  332. struct sde_hw_ctl *hw_ctl = NULL;
  333. struct sde_fence *fc, *next;
  334. spin_lock(&ctx->list_lock);
  335. if (list_empty(&ctx->fence_list_head)) {
  336. spin_unlock(&ctx->list_lock);
  337. return 0;
  338. }
  339. list_for_each_entry_safe(fc, next, &ctx->fence_list_head, fence_list) {
  340. struct dma_fence *fence = &fc->base;
  341. /* this is not hw-fence, or already processed */
  342. if (!test_bit(MSM_HW_FENCE_FLAG_ENABLED_BIT, &fence->flags))
  343. continue;
  344. hw_ctl = fc->hwfence_out_ctl;
  345. if (!hw_ctl) {
  346. /*
  347. * We flaged an output dma-fence as hw-fence but the hw ctl to handle
  348. * it is not available, this should not have happened, but if it does,
  349. * this can translate to a fence-timeout!
  350. */
  351. SDE_ERROR("invalid hw ctl, this can cause a fence-timeout!\n");
  352. SDE_EVT32(SDE_EVTLOG_ERROR, SDE_EVTLOG_FUNC_CASE1, fence->flags,
  353. fence->context, fence->seqno);
  354. spin_unlock(&ctx->list_lock);
  355. return -EINVAL;
  356. }
  357. }
  358. spin_unlock(&ctx->list_lock);
  359. /* arm dpu to trigger output hw-fence ipcc signal upon completion */
  360. if (hw_ctl)
  361. _arm_output_hw_fence(hw_ctl, line_count, debugfs_hw_fence);
  362. return 0;
  363. }
  364. /* update output hw_fences txq */
  365. int sde_fence_update_hw_fences_txq(struct sde_fence_context *ctx, bool vid_mode, u32 line_count,
  366. u32 debugfs_hw_fence)
  367. {
  368. int ret = 0;
  369. struct sde_hw_fence_data *data;
  370. struct sde_fence *fc, *next;
  371. struct sde_hw_ctl *hw_ctl = NULL;
  372. int ctl_id;
  373. bool txq_updated = false;
  374. spin_lock(&ctx->list_lock);
  375. if (list_empty(&ctx->fence_list_head)) {
  376. spin_unlock(&ctx->list_lock);
  377. return 0;
  378. }
  379. list_for_each_entry_safe(fc, next, &ctx->fence_list_head, fence_list) {
  380. struct dma_fence *fence = &fc->base;
  381. /* this is not hw-fence, or already processed */
  382. if (!test_bit(MSM_HW_FENCE_FLAG_ENABLED_BIT, &fence->flags) ||
  383. fc->txq_updated_fence)
  384. continue;
  385. hw_ctl = fc->hwfence_out_ctl;
  386. if (!hw_ctl) {
  387. /* We flaged an output dma-fence as hw-fence but the hw ctl to handle
  388. * it is not available, this should not have happened, but if it does,
  389. * this can translate to a fence-timeout!
  390. */
  391. SDE_ERROR("invalid hw ctl, this can cause a fence-timeout!\n");
  392. SDE_EVT32(SDE_EVTLOG_FUNC_CASE1, fence->flags, fence->context,
  393. fence->seqno, SDE_EVTLOG_ERROR);
  394. ret = -EINVAL;
  395. goto exit;
  396. }
  397. ctl_id = hw_ctl->idx - CTL_0;
  398. data = &hw_ctl->hwfence_data;
  399. if (IS_ERR_OR_NULL(data->hw_fence_handle)) {
  400. SDE_ERROR("unexpected handle for ctl_id:%d, this can fence-timeout\n",
  401. ctl_id);
  402. SDE_EVT32(SDE_EVTLOG_FUNC_CASE2, fence->flags, fence->context,
  403. fence->seqno, ctl_id, SDE_EVTLOG_ERROR);
  404. ret = -EINVAL;
  405. goto exit;
  406. }
  407. /* update hw-fence tx queue */
  408. ret = msm_hw_fence_update_txq(data->hw_fence_handle, fc->hwfence_index, 0, 0);
  409. if (ret) {
  410. SDE_ERROR("fail txq update index:%llu fctx:%llu seqno:%llu client:%d\n",
  411. fc->hwfence_index, fence->context, fence->seqno,
  412. data->hw_fence_client_id);
  413. SDE_EVT32(SDE_EVTLOG_FUNC_CASE3, fence->flags, fence->context,
  414. fence->seqno, ctl_id, SDE_EVTLOG_ERROR);
  415. goto exit;
  416. }
  417. /* avoid updating txq more than once and avoid repeating the same fence twice */
  418. txq_updated = fc->txq_updated_fence = true;
  419. SDE_DEBUG("update txq fence:0x%pK ctx:%llu seqno:%llu f:0x%llx ctl:%d vid:%d\n",
  420. fence, fence->context, fence->seqno, fence->flags, ctl_id, vid_mode);
  421. /* We will update TxQ one time per frame */
  422. if (txq_updated)
  423. break;
  424. }
  425. exit:
  426. spin_unlock(&ctx->list_lock);
  427. /* arm dpu to trigger output hw-fence ipcc signal upon completion in vid-mode */
  428. if ((txq_updated && hw_ctl) || line_count)
  429. _sde_fence_arm_output_hw_fence(ctx, line_count, debugfs_hw_fence);
  430. return ret;
  431. }
  432. static void _sde_hw_fence_release(struct sde_fence *f)
  433. {
  434. struct sde_hw_fence_data *data;
  435. struct sde_hw_ctl *hw_ctl = f->hwfence_out_ctl;
  436. int ctl_id;
  437. int ret;
  438. if (!hw_ctl) {
  439. SDE_ERROR("invalid hw_ctl\n");
  440. return;
  441. }
  442. ctl_id = hw_ctl->idx - CTL_0;
  443. data = &hw_ctl->hwfence_data;
  444. if (IS_ERR_OR_NULL(data->hw_fence_handle)) {
  445. SDE_ERROR("unexpected handle for ctl_id:%d\n", ctl_id);
  446. return;
  447. }
  448. SDE_DEBUG("destroy hw fence ctl_id:%d ctx:%llu seqno:%llu name:%s\n",
  449. ctl_id, f->base.context, f->base.seqno, f->name);
  450. /* Delete the HW fence */
  451. ret = msm_hw_fence_destroy(data->hw_fence_handle, &f->base);
  452. if (ret)
  453. SDE_ERROR("failed to destroy hw_fence for ctl_id:%d ctx:%llu seqno:%llu\n", ctl_id,
  454. f->base.context, f->base.seqno);
  455. }
  456. static int _reset_hw_fence_timeline(struct sde_hw_ctl *hw_ctl, u32 flags)
  457. {
  458. struct sde_hw_fence_data *data;
  459. int ret = 0;
  460. data = &hw_ctl->hwfence_data;
  461. if (!IS_ERR_OR_NULL(data->hw_fence_handle)) {
  462. SDE_EVT32(data->hw_fence_client_id);
  463. ret = msm_hw_fence_reset_client(data->hw_fence_handle, flags);
  464. if (ret) {
  465. pr_err("failed to reset client %d\n", data->hw_fence_client_id);
  466. return -EINVAL;
  467. }
  468. }
  469. return ret;
  470. }
  471. int sde_fence_update_input_hw_fence_signal(struct sde_hw_ctl *hw_ctl, u32 debugfs_hw_fence,
  472. struct sde_hw_mdp *hw_mdp, bool disable)
  473. {
  474. struct sde_hw_fence_data *data;
  475. u32 ipcc_signal_id;
  476. u32 ipcc_client_id;
  477. int ctl_id;
  478. /* we must support sw_override as well, so check both functions */
  479. if (!hw_mdp || !hw_ctl || !hw_ctl->ops.hw_fence_update_input_fence ||
  480. !hw_ctl->ops.hw_fence_trigger_sw_override) {
  481. SDE_ERROR("missing ctl/override/update fence %d\n", !hw_ctl);
  482. return -EINVAL;
  483. }
  484. ctl_id = hw_ctl->idx - CTL_0;
  485. data = &hw_ctl->hwfence_data;
  486. if (disable) {
  487. hw_ctl->ops.hw_fence_ctrl(hw_ctl, false, false, 0);
  488. return -EPERM;
  489. }
  490. if ((debugfs_hw_fence & SDE_INPUT_HW_FENCE_TIMESTAMP)
  491. && hw_mdp->ops.hw_fence_input_timestamp_ctrl)
  492. hw_mdp->ops.hw_fence_input_timestamp_ctrl(hw_mdp, true, false);
  493. ipcc_signal_id = data->ipcc_in_signal;
  494. ipcc_client_id = data->ipcc_in_client;
  495. SDE_DEBUG("configure input signal:%d out client:%d ctl_id:%d\n", ipcc_signal_id,
  496. ipcc_client_id, ctl_id);
  497. SDE_EVT32(ctl_id, ipcc_signal_id, ipcc_client_id);
  498. /* configure dpu hw for the client/signal pair signaling input-fence */
  499. hw_ctl->ops.hw_fence_update_input_fence(hw_ctl, ipcc_client_id, ipcc_signal_id);
  500. /* Enable hw-fence for this ctrl-path */
  501. hw_ctl->ops.hw_fence_ctrl(hw_ctl, true, true, 1);
  502. return 0;
  503. }
  504. void *sde_sync_get(uint64_t fd)
  505. {
  506. /* force signed compare, fdget accepts an int argument */
  507. return (signed int)fd >= 0 ? sync_file_get_fence(fd) : NULL;
  508. }
  509. void sde_sync_put(void *fence)
  510. {
  511. if (fence)
  512. dma_fence_put(fence);
  513. }
  514. void sde_fence_dump(struct dma_fence *fence)
  515. {
  516. char timeline_str[TIMELINE_VAL_LENGTH];
  517. if (fence->ops->timeline_value_str)
  518. fence->ops->timeline_value_str(fence, timeline_str, TIMELINE_VAL_LENGTH);
  519. SDE_ERROR(
  520. "fence drv name:%s timeline name:%s seqno:0x%llx timeline:%s signaled:0x%x status:%d flags:0x%x\n",
  521. fence->ops->get_driver_name(fence),
  522. fence->ops->get_timeline_name(fence),
  523. fence->seqno, timeline_str,
  524. fence->ops->signaled ?
  525. fence->ops->signaled(fence) : 0xffffffff,
  526. dma_fence_get_status(fence), fence->flags);
  527. }
  528. static void sde_fence_dump_user_fds_info(struct dma_fence *base_fence)
  529. {
  530. struct dma_fence_array *array;
  531. struct dma_fence *user_fence;
  532. int i;
  533. array = container_of(base_fence, struct dma_fence_array, base);
  534. if (test_bit(SPEC_FENCE_FLAG_FENCE_ARRAY, &base_fence->flags) &&
  535. test_bit(SPEC_FENCE_FLAG_ARRAY_BIND, &base_fence->flags)) {
  536. for (i = 0; i < array->num_fences; i++) {
  537. user_fence = array->fences[i];
  538. if (user_fence) {
  539. dma_fence_get(user_fence);
  540. sde_fence_dump(user_fence);
  541. dma_fence_put(user_fence);
  542. }
  543. }
  544. }
  545. }
  546. signed long sde_sync_wait(void *fnc, long timeout_ms)
  547. {
  548. struct dma_fence *fence = fnc;
  549. int rc, status = 0;
  550. if (!fence)
  551. return -EINVAL;
  552. else if (dma_fence_is_signaled(fence))
  553. return timeout_ms ? msecs_to_jiffies(timeout_ms) : 1;
  554. rc = dma_fence_wait_timeout(fence, true,
  555. msecs_to_jiffies(timeout_ms));
  556. if (!rc || (rc == -EINVAL) || fence->error) {
  557. status = dma_fence_get_status(fence);
  558. if (test_bit(SPEC_FENCE_FLAG_FENCE_ARRAY, &fence->flags)) {
  559. if (status == -EINVAL) {
  560. SDE_INFO("spec fence bind failure status:%d\n", status);
  561. rc = -EBADF;
  562. } else if (fence->ops->signaled && fence->ops->signaled(fence)) {
  563. SDE_INFO("spec fence status:%d\n", status);
  564. } else {
  565. sde_fence_dump(fence);
  566. sde_fence_dump_user_fds_info(fence);
  567. }
  568. } else {
  569. sde_fence_dump(fence);
  570. }
  571. }
  572. return rc;
  573. }
  574. uint32_t sde_sync_get_name_prefix(void *fence)
  575. {
  576. const char *name;
  577. uint32_t i, prefix;
  578. struct dma_fence *f = fence;
  579. if (!fence)
  580. return 0;
  581. name = f->ops->get_driver_name(f);
  582. if (!name)
  583. return 0;
  584. prefix = 0x0;
  585. for (i = 0; i < sizeof(uint32_t) && name[i]; ++i)
  586. prefix = (prefix << CHAR_BIT) | name[i];
  587. return prefix;
  588. }
  589. static void sde_fence_destroy(struct kref *kref)
  590. {
  591. struct sde_fence_context *ctx;
  592. if (!kref) {
  593. SDE_ERROR("received invalid kref\n");
  594. return;
  595. }
  596. ctx = container_of(kref, struct sde_fence_context, kref);
  597. kfree(ctx);
  598. }
  599. static inline struct sde_fence *to_sde_fence(struct dma_fence *fence)
  600. {
  601. return container_of(fence, struct sde_fence, base);
  602. }
  603. static const char *sde_fence_get_driver_name(struct dma_fence *fence)
  604. {
  605. struct sde_fence *f = to_sde_fence(fence);
  606. return f->name;
  607. }
  608. static const char *sde_fence_get_timeline_name(struct dma_fence *fence)
  609. {
  610. struct sde_fence *f = to_sde_fence(fence);
  611. return f->ctx->name;
  612. }
  613. static bool sde_fence_enable_signaling(struct dma_fence *fence)
  614. {
  615. return true;
  616. }
  617. static bool sde_fence_signaled(struct dma_fence *fence)
  618. {
  619. struct sde_fence *f = to_sde_fence(fence);
  620. bool status;
  621. status = ((int)(fence->seqno - f->ctx->done_count) <= 0);
  622. SDE_DEBUG("status:%d fence seq:%llu and timeline:%u\n",
  623. status, fence->seqno, f->ctx->done_count);
  624. return status;
  625. }
  626. static void sde_fence_release(struct dma_fence *fence)
  627. {
  628. struct sde_fence *f;
  629. if (fence) {
  630. f = to_sde_fence(fence);
  631. /* Delete the HW fence */
  632. if (test_bit(MSM_HW_FENCE_FLAG_ENABLED_BIT, &fence->flags))
  633. _sde_hw_fence_release(f);
  634. kref_put(&f->ctx->kref, sde_fence_destroy);
  635. kfree(f);
  636. }
  637. }
  638. static void sde_fence_value_str(struct dma_fence *fence, char *str, int size)
  639. {
  640. if (!fence || !str)
  641. return;
  642. snprintf(str, size, "%llu", fence->seqno);
  643. }
  644. static void sde_fence_timeline_value_str(struct dma_fence *fence, char *str,
  645. int size)
  646. {
  647. struct sde_fence *f = to_sde_fence(fence);
  648. if (!fence || !f->ctx || !str)
  649. return;
  650. snprintf(str, size, "%d", f->ctx->done_count);
  651. }
  652. static struct dma_fence_ops sde_fence_ops = {
  653. .get_driver_name = sde_fence_get_driver_name,
  654. .get_timeline_name = sde_fence_get_timeline_name,
  655. .enable_signaling = sde_fence_enable_signaling,
  656. .signaled = sde_fence_signaled,
  657. .wait = dma_fence_default_wait,
  658. .release = sde_fence_release,
  659. .fence_value_str = sde_fence_value_str,
  660. .timeline_value_str = sde_fence_timeline_value_str,
  661. };
  662. /**
  663. * _sde_fence_create_fd - create fence object and return an fd for it
  664. * This function is NOT thread-safe.
  665. * @timeline: Timeline to associate with fence
  666. * @val: Timeline value at which to signal the fence
  667. * Return: File descriptor on success, or error code on error
  668. */
  669. static int _sde_fence_create_fd(void *fence_ctx, uint32_t val, struct sde_hw_ctl *hw_ctl)
  670. {
  671. struct sde_fence *sde_fence;
  672. struct sync_file *sync_file;
  673. signed int fd = -EINVAL;
  674. struct sde_fence_context *ctx = fence_ctx;
  675. if (!ctx) {
  676. SDE_ERROR("invalid context\n");
  677. goto exit;
  678. }
  679. sde_fence = kzalloc(sizeof(*sde_fence), GFP_KERNEL);
  680. if (!sde_fence)
  681. return -ENOMEM;
  682. sde_fence->ctx = fence_ctx;
  683. snprintf(sde_fence->name, SDE_FENCE_NAME_SIZE, "sde_fence:%s:%u",
  684. sde_fence->ctx->name, val);
  685. dma_fence_init(&sde_fence->base, &sde_fence_ops, &ctx->lock,
  686. ctx->context, val);
  687. kref_get(&ctx->kref);
  688. /* create fd */
  689. fd = get_unused_fd_flags(0);
  690. if (fd < 0) {
  691. SDE_ERROR("failed to get_unused_fd_flags(), %s\n",
  692. sde_fence->name);
  693. dma_fence_put(&sde_fence->base);
  694. goto exit;
  695. }
  696. /* create fence */
  697. sync_file = sync_file_create(&sde_fence->base);
  698. if (sync_file == NULL) {
  699. put_unused_fd(fd);
  700. fd = -EINVAL;
  701. SDE_ERROR("couldn't create fence, %s\n", sde_fence->name);
  702. dma_fence_put(&sde_fence->base);
  703. goto exit;
  704. }
  705. /* If ctl_id is valid, try to create a hw-fence */
  706. if (hw_ctl)
  707. sde_fence_create_hw_fence(hw_ctl, sde_fence);
  708. fd_install(fd, sync_file->file);
  709. sde_fence->fd = fd;
  710. spin_lock(&ctx->list_lock);
  711. list_add_tail(&sde_fence->fence_list, &ctx->fence_list_head);
  712. spin_unlock(&ctx->list_lock);
  713. exit:
  714. return fd;
  715. }
  716. struct sde_fence_context *sde_fence_init(const char *name, uint32_t drm_id)
  717. {
  718. struct sde_fence_context *ctx;
  719. if (!name) {
  720. SDE_ERROR("invalid argument(s)\n");
  721. return ERR_PTR(-EINVAL);
  722. }
  723. ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
  724. if (!ctx) {
  725. SDE_ERROR("failed to alloc fence ctx\n");
  726. return ERR_PTR(-ENOMEM);
  727. }
  728. strlcpy(ctx->name, name, ARRAY_SIZE(ctx->name));
  729. ctx->drm_id = drm_id;
  730. kref_init(&ctx->kref);
  731. ctx->context = dma_fence_context_alloc(1);
  732. spin_lock_init(&ctx->lock);
  733. spin_lock_init(&ctx->list_lock);
  734. INIT_LIST_HEAD(&ctx->fence_list_head);
  735. return ctx;
  736. }
  737. void sde_fence_deinit(struct sde_fence_context *ctx)
  738. {
  739. if (!ctx) {
  740. SDE_ERROR("invalid fence\n");
  741. return;
  742. }
  743. kref_put(&ctx->kref, sde_fence_destroy);
  744. }
  745. void sde_fence_prepare(struct sde_fence_context *ctx)
  746. {
  747. unsigned long flags;
  748. if (!ctx) {
  749. SDE_ERROR("invalid argument(s), fence %pK\n", ctx);
  750. } else {
  751. spin_lock_irqsave(&ctx->lock, flags);
  752. ++ctx->commit_count;
  753. spin_unlock_irqrestore(&ctx->lock, flags);
  754. }
  755. }
  756. static void _sde_fence_trigger(struct sde_fence_context *ctx, bool error, ktime_t ts)
  757. {
  758. unsigned long flags;
  759. struct sde_fence *fc, *next;
  760. bool is_signaled = false;
  761. kref_get(&ctx->kref);
  762. spin_lock(&ctx->list_lock);
  763. if (list_empty(&ctx->fence_list_head)) {
  764. SDE_DEBUG("nothing to trigger!\n");
  765. goto end;
  766. }
  767. list_for_each_entry_safe(fc, next, &ctx->fence_list_head, fence_list) {
  768. spin_lock_irqsave(&ctx->lock, flags);
  769. if (error)
  770. dma_fence_set_error(&fc->base, -EBUSY);
  771. is_signaled = sde_fence_signaled(&fc->base);
  772. if (is_signaled)
  773. dma_fence_signal_timestamp_locked(&fc->base, ts);
  774. spin_unlock_irqrestore(&ctx->lock, flags);
  775. if (is_signaled) {
  776. list_del_init(&fc->fence_list);
  777. dma_fence_put(&fc->base);
  778. }
  779. }
  780. end:
  781. spin_unlock(&ctx->list_lock);
  782. kref_put(&ctx->kref, sde_fence_destroy);
  783. }
  784. int sde_fence_create(struct sde_fence_context *ctx, uint64_t *val,
  785. uint32_t offset, struct sde_hw_ctl *hw_ctl)
  786. {
  787. uint32_t trigger_value;
  788. int fd, rc = -EINVAL;
  789. unsigned long flags;
  790. if (!ctx || !val) {
  791. SDE_ERROR("invalid argument(s), fence %d, pval %d\n",
  792. ctx != NULL, val != NULL);
  793. return rc;
  794. }
  795. /*
  796. * Allow created fences to have a constant offset with respect
  797. * to the timeline. This allows us to delay the fence signalling
  798. * w.r.t. the commit completion (e.g., an offset of +1 would
  799. * cause fences returned during a particular commit to signal
  800. * after an additional delay of one commit, rather than at the
  801. * end of the current one.
  802. */
  803. spin_lock_irqsave(&ctx->lock, flags);
  804. trigger_value = ctx->commit_count + offset;
  805. spin_unlock_irqrestore(&ctx->lock, flags);
  806. fd = _sde_fence_create_fd(ctx, trigger_value, hw_ctl);
  807. *val = fd;
  808. SDE_DEBUG("fd:%d trigger:%d commit:%d offset:%d\n",
  809. fd, trigger_value, ctx->commit_count, offset);
  810. SDE_EVT32(ctx->drm_id, trigger_value, fd, hw_ctl ? hw_ctl->idx : 0);
  811. rc = (fd >= 0) ? 0 : fd;
  812. return rc;
  813. }
  814. void sde_fence_signal(struct sde_fence_context *ctx, ktime_t ts,
  815. enum sde_fence_event fence_event, struct sde_hw_ctl *hw_ctl)
  816. {
  817. unsigned long flags;
  818. if (!ctx) {
  819. SDE_ERROR("invalid ctx, %pK\n", ctx);
  820. return;
  821. }
  822. spin_lock_irqsave(&ctx->lock, flags);
  823. if (fence_event == SDE_FENCE_RESET_TIMELINE) {
  824. /* reset hw-fences without error */
  825. if (hw_ctl)
  826. _reset_hw_fence_timeline(hw_ctl, MSM_HW_FENCE_RESET_WITHOUT_ERROR |
  827. MSM_HW_FENCE_RESET_WITHOUT_DESTROY);
  828. if ((int)(ctx->done_count - ctx->commit_count) < 0) {
  829. SDE_DEBUG(
  830. "timeline reset attempt! ctx:0x%x done count:%d commit:%d\n",
  831. ctx->drm_id, ctx->done_count, ctx->commit_count);
  832. ctx->done_count = ctx->commit_count;
  833. SDE_EVT32(ctx->drm_id, ctx->done_count,
  834. ctx->commit_count, ktime_to_us(ts),
  835. fence_event, SDE_EVTLOG_FUNC_CASE1);
  836. } else {
  837. spin_unlock_irqrestore(&ctx->lock, flags);
  838. return;
  839. }
  840. } else if ((int)(ctx->done_count - ctx->commit_count) < 0) {
  841. ++ctx->done_count;
  842. SDE_DEBUG("fence_signal:done count:%d commit count:%d\n",
  843. ctx->done_count, ctx->commit_count);
  844. } else {
  845. SDE_ERROR("extra signal attempt! done count:%d commit:%d\n",
  846. ctx->done_count, ctx->commit_count);
  847. SDE_EVT32(ctx->drm_id, ctx->done_count, ctx->commit_count,
  848. ktime_to_us(ts), fence_event, SDE_EVTLOG_FATAL);
  849. spin_unlock_irqrestore(&ctx->lock, flags);
  850. return;
  851. }
  852. spin_unlock_irqrestore(&ctx->lock, flags);
  853. SDE_EVT32(ctx->drm_id, ctx->done_count, ctx->commit_count,
  854. ktime_to_us(ts));
  855. _sde_fence_trigger(ctx, (fence_event == SDE_FENCE_SIGNAL_ERROR), ts);
  856. }
  857. void sde_fence_timeline_status(struct sde_fence_context *ctx,
  858. struct drm_mode_object *drm_obj)
  859. {
  860. char *obj_name;
  861. if (!ctx || !drm_obj) {
  862. SDE_ERROR("invalid input params\n");
  863. return;
  864. }
  865. switch (drm_obj->type) {
  866. case DRM_MODE_OBJECT_CRTC:
  867. obj_name = "crtc";
  868. break;
  869. case DRM_MODE_OBJECT_CONNECTOR:
  870. obj_name = "connector";
  871. break;
  872. default:
  873. obj_name = "unknown";
  874. break;
  875. }
  876. SDE_ERROR("drm obj:%s id:%d type:0x%x done_count:%d commit_count:%d\n",
  877. obj_name, drm_obj->id, drm_obj->type, ctx->done_count,
  878. ctx->commit_count);
  879. }
  880. void sde_fence_list_dump(struct dma_fence *fence, struct seq_file **s)
  881. {
  882. char timeline_str[TIMELINE_VAL_LENGTH];
  883. if (fence->ops->timeline_value_str)
  884. fence->ops->timeline_value_str(fence,
  885. timeline_str, TIMELINE_VAL_LENGTH);
  886. seq_printf(*s, "fence name:%s timeline name:%s seqno:0x%llx timeline:%s signaled:0x%x\n",
  887. fence->ops->get_driver_name(fence),
  888. fence->ops->get_timeline_name(fence),
  889. fence->seqno, timeline_str,
  890. fence->ops->signaled ?
  891. fence->ops->signaled(fence) : 0xffffffff);
  892. }
  893. void sde_debugfs_timeline_dump(struct sde_fence_context *ctx,
  894. struct drm_mode_object *drm_obj, struct seq_file **s)
  895. {
  896. char *obj_name;
  897. struct sde_fence *fc, *next;
  898. struct dma_fence *fence;
  899. if (!ctx || !drm_obj) {
  900. SDE_ERROR("invalid input params\n");
  901. return;
  902. }
  903. switch (drm_obj->type) {
  904. case DRM_MODE_OBJECT_CRTC:
  905. obj_name = "crtc";
  906. break;
  907. case DRM_MODE_OBJECT_CONNECTOR:
  908. obj_name = "connector";
  909. break;
  910. default:
  911. obj_name = "unknown";
  912. break;
  913. }
  914. seq_printf(*s, "drm obj:%s id:%d type:0x%x done_count:%d commit_count:%d\n",
  915. obj_name, drm_obj->id, drm_obj->type, ctx->done_count,
  916. ctx->commit_count);
  917. spin_lock(&ctx->list_lock);
  918. list_for_each_entry_safe(fc, next, &ctx->fence_list_head, fence_list) {
  919. fence = &fc->base;
  920. sde_fence_list_dump(fence, s);
  921. }
  922. spin_unlock(&ctx->list_lock);
  923. }