sde_fence.c 37 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
  4. * Copyright (c) 2016-2021, The Linux Foundation. All rights reserved.
  5. */
  6. #define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
  7. #include <linux/sync_file.h>
  8. #include <linux/dma-fence.h>
  9. #include <linux/dma-fence-array.h>
  10. #include <linux/file.h>
  11. #include "msm_drv.h"
  12. #include "sde_kms.h"
  13. #include "sde_fence.h"
  14. #include "sde_encoder.h"
  15. #define TIMELINE_VAL_LENGTH 128
  16. #define SPEC_FENCE_FLAG_FENCE_ARRAY 0x10
  17. #define SPEC_FENCE_FLAG_ARRAY_BIND 0x11
  18. #define HW_FENCE_DIR_WRITE_SIZE 0x2
  19. #define HW_FENCE_DIR_WRITE_MASK 0xFFFFFFFF
  20. #define HW_FENCE_HFI_MMAP_DPU_BA 0x200000
  21. /**
  22. * struct sde_fence - release/retire fence structure
  23. * @base: base fence structure
  24. * @ctx: fence context
  25. * @name: name of each fence- it is fence timeline + commit_count
  26. * @fence_list: list to associated this fence on timeline/context
  27. * @fd: fd attached to this fence - debugging purpose.
  28. * @hwfence_out_ctl: hw ctl for the output fence
  29. * @hwfence_index: hw fence index for this fence
  30. * @txq_updated_fence: flag to indicate that a fence has been updated in txq
  31. */
  32. struct sde_fence {
  33. struct dma_fence base;
  34. struct sde_fence_context *ctx;
  35. char name[SDE_FENCE_NAME_SIZE];
  36. struct list_head fence_list;
  37. int fd;
  38. struct sde_hw_ctl *hwfence_out_ctl;
  39. u64 hwfence_index;
  40. bool txq_updated_fence;
  41. };
  42. /**
  43. * enum sde_hw_fence_clients - sde clients for the hw-fence feature
  44. *
  45. * Do not modify the order of this struct and/or add more elements
  46. * without modify/add fields in the 'hw_fence_data' structs.
  47. */
  48. enum sde_hw_fence_clients {
  49. SDE_HW_FENCE_CLIENT_CTL_0,
  50. SDE_HW_FENCE_CLIENT_CTL_1,
  51. SDE_HW_FENCE_CLIENT_CTL_2,
  52. SDE_HW_FENCE_CLIENT_CTL_3,
  53. SDE_HW_FENCE_CLIENT_CTL_4,
  54. SDE_HW_FENCE_CLIENT_CTL_5,
  55. SDE_HW_FENCE_CLIENT_MAX,
  56. };
  57. /**
  58. * hw_fence_data_dpu_client - this table maps the dpu ipcc input and output signals for each display
  59. * clients to communicate with the fence controller.
  60. * This struct must match the order of the 'sde_hw_fence_clients' enum,
  61. * the output signal must match with the signals that FenceCTL expects for each display client.
  62. * This 'hw_fence_data_dpu_client' must be used for HW that does not support dpu-signal.
  63. */
  64. struct sde_hw_fence_data hw_fence_data_no_dpu[SDE_HW_FENCE_CLIENT_MAX] = {
  65. {SDE_HW_FENCE_CLIENT_CTL_0, HW_FENCE_CLIENT_ID_CTL0, NULL, {0}, NULL, NULL, 8, 14, {2, 3},
  66. 0, 8, 8, 0, 0},
  67. {SDE_HW_FENCE_CLIENT_CTL_1, HW_FENCE_CLIENT_ID_CTL1, NULL, {0}, NULL, NULL, 8, 15, {4, 5},
  68. 0, 8, 8, 0, 0},
  69. {SDE_HW_FENCE_CLIENT_CTL_2, HW_FENCE_CLIENT_ID_CTL2, NULL, {0}, NULL, NULL, 8, 16, {6, 7},
  70. 0, 8, 8, 0, 0},
  71. {SDE_HW_FENCE_CLIENT_CTL_3, HW_FENCE_CLIENT_ID_CTL3, NULL, {0}, NULL, NULL, 8, 17, {8, 9},
  72. 0, 8, 8, 0, 0},
  73. {SDE_HW_FENCE_CLIENT_CTL_4, HW_FENCE_CLIENT_ID_CTL4, NULL, {0}, NULL, NULL, 8, 18, {10, 11},
  74. 0, 8, 8, 0, 0},
  75. {SDE_HW_FENCE_CLIENT_CTL_5, HW_FENCE_CLIENT_ID_CTL5, NULL, {0}, NULL, NULL, 8, 19, {12, 13},
  76. 0, 8, 8, 0, 0}
  77. };
  78. /**
  79. * hw_fence_data_dpu_client - this table maps the dpu ipcc input and output signals for each display
  80. * clients to communicate with the fence controller.
  81. * This struct must match the order of the 'sde_hw_fence_clients' enum,
  82. * the output signal must match with the signals that FenceCTL expects for each display client.
  83. * This 'hw_fence_data_dpu_client' must be used for HW that supports dpu-signal
  84. */
  85. struct sde_hw_fence_data hw_fence_data_dpu_client[SDE_HW_FENCE_CLIENT_MAX] = {
  86. {SDE_HW_FENCE_CLIENT_CTL_0, HW_FENCE_CLIENT_ID_CTL0, NULL, {0}, NULL, NULL, 8, 0, {0, 6},
  87. 0, 8, 25, 0, 0},
  88. {SDE_HW_FENCE_CLIENT_CTL_1, HW_FENCE_CLIENT_ID_CTL1, NULL, {0}, NULL, NULL, 8, 1, {1, 7},
  89. 0, 8, 25, 0, 0},
  90. {SDE_HW_FENCE_CLIENT_CTL_2, HW_FENCE_CLIENT_ID_CTL2, NULL, {0}, NULL, NULL, 8, 2, {2, 8},
  91. 0, 8, 25, 0, 0},
  92. {SDE_HW_FENCE_CLIENT_CTL_3, HW_FENCE_CLIENT_ID_CTL3, NULL, {0}, NULL, NULL, 8, 3, {3, 9},
  93. 0, 8, 25, 0, 0},
  94. {SDE_HW_FENCE_CLIENT_CTL_4, HW_FENCE_CLIENT_ID_CTL4, NULL, {0}, NULL, NULL, 8, 4, {4, 10},
  95. 0, 8, 25, 0, 0},
  96. {SDE_HW_FENCE_CLIENT_CTL_5, HW_FENCE_CLIENT_ID_CTL5, NULL, {0}, NULL, NULL, 8, 5, {5, 11},
  97. 0, 8, 25, 0, 0}
  98. };
  99. void msm_hw_fence_error_cb(u32 handle, int error, void *cb_data)
  100. {
  101. struct msm_hw_fence_cb_data *msm_hw_fence_cb_data;
  102. struct sde_hw_fence_error_cb_data *sde_hw_fence_error_data;
  103. SDE_EVT32(handle, error, SDE_EVTLOG_FUNC_ENTRY);
  104. msm_hw_fence_cb_data = (struct msm_hw_fence_cb_data *)cb_data;
  105. if (!msm_hw_fence_cb_data) {
  106. SDE_ERROR("msm hw fence cb data is NULL.\n");
  107. SDE_EVT32(SDE_EVTLOG_FUNC_CASE1, SDE_EVTLOG_ERROR);
  108. return;
  109. }
  110. sde_hw_fence_error_data = (struct sde_hw_fence_error_cb_data *)(msm_hw_fence_cb_data->data);
  111. if (!sde_hw_fence_error_data) {
  112. SDE_ERROR("sde hw fence cb data is NULL.\n");
  113. SDE_EVT32(SDE_EVTLOG_FUNC_CASE2, SDE_EVTLOG_ERROR);
  114. return;
  115. }
  116. sde_encoder_handle_hw_fence_error(sde_hw_fence_error_data->ctl_idx,
  117. sde_hw_fence_error_data->sde_kms, handle, error);
  118. }
  119. int sde_hw_fence_init(struct sde_hw_ctl *hw_ctl, struct sde_kms *sde_kms, bool use_dpu_ipcc,
  120. struct msm_mmu *mmu)
  121. {
  122. struct msm_hw_fence_hfi_queue_header *hfi_queue_header_va, *hfi_queue_header_pa;
  123. struct msm_hw_fence_hfi_queue_table_header *hfi_table_header;
  124. struct sde_hw_fence_data *sde_hw_fence_data;
  125. struct sde_hw_fence_data *hwfence_data;
  126. struct sde_hw_fence_error_cb_data *sde_hw_fence_error_cb_data;
  127. phys_addr_t queue_pa;
  128. void *queue_va;
  129. u32 qhdr0_offset, ctl_hfi_iova;
  130. int ctl_id, ret;
  131. if (!hw_ctl || !hw_ctl->ops.hw_fence_output_fence_dir_write_init)
  132. return -EINVAL;
  133. ctl_id = hw_ctl->idx - CTL_0;
  134. if (ctl_id >= SDE_HW_FENCE_CLIENT_MAX || ctl_id < 0) {
  135. SDE_ERROR("unexpected ctl_id:%d\n", ctl_id);
  136. return -EINVAL;
  137. }
  138. hwfence_data = &hw_ctl->hwfence_data;
  139. sde_hw_fence_data = use_dpu_ipcc ? hw_fence_data_dpu_client : hw_fence_data_no_dpu;
  140. if (sde_hw_fence_data[ctl_id].client_id != ctl_id) {
  141. SDE_ERROR("Unexpected client_id:%d for ctl_id:%d\n",
  142. sde_hw_fence_data[ctl_id].client_id, ctl_id);
  143. return -EINVAL;
  144. }
  145. /* init the default fence-data for this client */
  146. memcpy(hwfence_data, &sde_hw_fence_data[ctl_id], sizeof(struct sde_hw_fence_data));
  147. SDE_DEBUG("hwfence register ctl:%d client:%d\n", ctl_id, hwfence_data->hw_fence_client_id);
  148. hwfence_data->hw_fence_handle = msm_hw_fence_register(hwfence_data->hw_fence_client_id,
  149. &hwfence_data->mem_descriptor);
  150. hwfence_data->dma_context = dma_fence_context_alloc(1);
  151. if (IS_ERR_OR_NULL(hwfence_data->hw_fence_handle)) {
  152. hwfence_data->hw_fence_handle = NULL;
  153. SDE_DEBUG("error cannot register ctl_id:%d hw-fence client:%d\n", ctl_id,
  154. hwfence_data->hw_fence_client_id);
  155. return -EINVAL;
  156. }
  157. sde_hw_fence_error_cb_data = &(hwfence_data->sde_hw_fence_error_cb_data);
  158. sde_hw_fence_error_cb_data->ctl_idx = hw_ctl->idx;
  159. sde_hw_fence_error_cb_data->sde_kms = sde_kms;
  160. ret = msm_hw_fence_register_error_cb(hwfence_data->hw_fence_handle,
  161. msm_hw_fence_error_cb, (void *)sde_hw_fence_error_cb_data);
  162. if (ret) {
  163. SDE_EVT32(hw_ctl->idx, SDE_EVTLOG_ERROR);
  164. SDE_DEBUG("hw fence cb register failed. ret = %d\n", ret);
  165. }
  166. /* one-to-one memory map of ctl-path client queues */
  167. ctl_hfi_iova = HW_FENCE_HFI_MMAP_DPU_BA +
  168. PAGE_ALIGN(hwfence_data->mem_descriptor.size * ctl_id);
  169. ret = mmu->funcs->one_to_one_map(mmu, ctl_hfi_iova,
  170. hwfence_data->mem_descriptor.device_addr,
  171. hwfence_data->mem_descriptor.size, IOMMU_READ | IOMMU_WRITE);
  172. if (ret) {
  173. SDE_ERROR("queue one2one memory smmu map failed, ret:%d ctl_id:%d, client:%d\n",
  174. ret, ctl_id, hwfence_data->hw_fence_client_id);
  175. return ret;
  176. }
  177. /* get queue header offset */
  178. queue_va = hwfence_data->mem_descriptor.virtual_addr;
  179. hfi_table_header = (struct msm_hw_fence_hfi_queue_table_header *)queue_va;
  180. qhdr0_offset = hfi_table_header->qhdr0_offset;
  181. /* initialize tx_wm pointer */
  182. hfi_queue_header_va = (struct msm_hw_fence_hfi_queue_header *)(queue_va + qhdr0_offset);
  183. hwfence_data->txq_tx_wm_va = &hfi_queue_header_va->tx_wm;
  184. /* initialize txq wr_ptr addr pointer */
  185. queue_pa = ctl_hfi_iova;
  186. hfi_queue_header_pa = (struct msm_hw_fence_hfi_queue_header *)(queue_pa + qhdr0_offset);
  187. hwfence_data->txq_wr_ptr_pa = &hfi_queue_header_pa->write_index;
  188. SDE_DEBUG("hwfence registered ctl:%d client:%d handle:0x%pK tx_wm:0x%x wr_idx:0x%x\n",
  189. ctl_id, hwfence_data->hw_fence_client_id, hwfence_data->hw_fence_handle,
  190. *hwfence_data->txq_tx_wm_va, *hwfence_data->txq_wr_ptr_pa);
  191. return 0;
  192. }
  193. void sde_hw_fence_deinit(struct sde_hw_ctl *hw_ctl)
  194. {
  195. struct sde_hw_fence_data *hwfence_data;
  196. if (!hw_ctl)
  197. return;
  198. hwfence_data = &hw_ctl->hwfence_data;
  199. /* client was not registered */
  200. if (IS_ERR_OR_NULL(hwfence_data->hw_fence_handle))
  201. return;
  202. SDE_DEBUG("hwfence deregister ctl_id:%d hw_fence_client_id:%d\n",
  203. hw_ctl->idx - CTL_0, hwfence_data->hw_fence_client_id);
  204. msm_hw_fence_deregister(hwfence_data->hw_fence_handle);
  205. hwfence_data->hw_fence_handle = NULL;
  206. }
  207. static int sde_fence_create_hw_fence(struct sde_hw_ctl *hw_ctl, struct sde_fence *sde_fence)
  208. {
  209. struct sde_hw_fence_data *data;
  210. struct msm_hw_fence_create_params params;
  211. int ctl_id;
  212. u64 hwfence_index;
  213. int ret;
  214. if (!hw_ctl)
  215. return -EINVAL;
  216. ctl_id = hw_ctl->idx - CTL_0;
  217. data = &hw_ctl->hwfence_data;
  218. if (IS_ERR_OR_NULL(data->hw_fence_handle)) {
  219. SDE_ERROR("unexpected handle for ctl_id:%d\n", ctl_id);
  220. return -EINVAL;
  221. }
  222. params.fence = &sde_fence->base;
  223. params.handle = &hwfence_index;
  224. /* Create the HW fence */
  225. ret = msm_hw_fence_create(data->hw_fence_handle, &params);
  226. if (ret) {
  227. SDE_ERROR("failed to create hw_fence for client:%d ctx:%llu seqno:%llu\n", ctl_id,
  228. sde_fence->base.context, sde_fence->base.seqno);
  229. } else {
  230. /* store ctl and index for this fence */
  231. sde_fence->hwfence_out_ctl = hw_ctl;
  232. sde_fence->hwfence_index = hwfence_index;
  233. SDE_DEBUG("create hfence index:%llu ctl:%d ctx:%llu seqno:%llu name:%s\n",
  234. sde_fence->hwfence_index, ctl_id, sde_fence->base.context,
  235. sde_fence->base.seqno, sde_fence->name);
  236. }
  237. return ret;
  238. }
  239. static inline char *_get_client_id_name(int hw_fence_client_id)
  240. {
  241. switch (hw_fence_client_id) {
  242. case HW_FENCE_CLIENT_ID_CTX0:
  243. return "HW_FENCE_CLIENT_ID_CTX0";
  244. case HW_FENCE_CLIENT_ID_CTL0:
  245. return "HW_FENCE_CLIENT_ID_CTL0";
  246. case HW_FENCE_CLIENT_ID_CTL1:
  247. return "HW_FENCE_CLIENT_ID_CTL1";
  248. case HW_FENCE_CLIENT_ID_CTL2:
  249. return "HW_FENCE_CLIENT_ID_CTL2";
  250. case HW_FENCE_CLIENT_ID_CTL3:
  251. return "HW_FENCE_CLIENT_ID_CTL3";
  252. case HW_FENCE_CLIENT_ID_CTL4:
  253. return "HW_FENCE_CLIENT_ID_CTL4";
  254. case HW_FENCE_CLIENT_ID_CTL5:
  255. return "HW_FENCE_CLIENT_ID_CTL15";
  256. default:
  257. return "Unknown";
  258. }
  259. return "unknown";
  260. }
  261. static void _cleanup_fences_refcount(struct dma_fence **fences, u32 num_fences)
  262. {
  263. int i;
  264. for (i = 0; i < num_fences; i++)
  265. dma_fence_put(fences[i]);
  266. }
  267. int sde_fence_register_hw_fences_wait(struct sde_hw_ctl *hw_ctl, struct dma_fence **fences,
  268. u32 num_fences)
  269. {
  270. struct sde_hw_fence_data *data;
  271. int i, j, ret;
  272. int ctl_id;
  273. struct dma_fence_array *temp_array = NULL;
  274. struct dma_fence *base_fence;
  275. struct dma_fence **hw_fences;
  276. u32 num_hw_fences;
  277. struct dma_fence **fence_list;
  278. struct dma_fence_array *array = NULL;
  279. int array_childs = 0;
  280. int array_count = 0;
  281. int fence_list_index = 0;
  282. u64 seqno;
  283. if (!hw_ctl) {
  284. SDE_ERROR("wrong ctl\n");
  285. return -EINVAL;
  286. }
  287. ctl_id = hw_ctl->idx - CTL_0;
  288. data = &hw_ctl->hwfence_data;
  289. if (IS_ERR_OR_NULL(data->hw_fence_handle)) {
  290. SDE_ERROR("unexpected handle for ctl_id:%d\n", ctl_id);
  291. return -EINVAL;
  292. }
  293. SDE_DEBUG("register for wait fences:%d ctl_id:%d hw_fence_client:%s\n",
  294. num_fences, ctl_id, _get_client_id_name(data->hw_fence_client_id));
  295. for (i = 0; i < num_fences; i++) {
  296. /* get a refcount for each of the fences */
  297. dma_fence_get(fences[i]);
  298. if (dma_fence_is_array(fences[i])) {
  299. array_count++;
  300. array = container_of(fences[i], struct dma_fence_array, base);
  301. array_childs += array->num_fences;
  302. }
  303. SDE_DEBUG("registering fence: ctx:%llu seqno:%llu\n",
  304. (fences[i])->context, (fences[i])->seqno);
  305. }
  306. if (num_fences > 1) {
  307. /* fence_list memory is freed during fence-array release */
  308. fence_list = kzalloc(((num_fences - array_count) + array_childs)
  309. * (sizeof(struct dma_fence *)), GFP_KERNEL);
  310. if (!fence_list) {
  311. _cleanup_fences_refcount(fences, num_fences);
  312. return -EINVAL;
  313. }
  314. /* populate fence_list with the fences */
  315. for (i = 0; i < num_fences; i++) {
  316. if (dma_fence_is_array(fences[i])) {
  317. array = container_of(fences[i], struct dma_fence_array, base);
  318. for (j = 0; j < array->num_fences; j++) {
  319. /* get a refcount for each of the child fences */
  320. dma_fence_get(array->fences[j]);
  321. fence_list[fence_list_index++] = array->fences[j];
  322. }
  323. if (array->num_fences) /* print the first fence from array */
  324. SDE_EVT32(ctl_id, num_fences, array->num_fences, i,
  325. SDE_EVTLOG_H32(array->fences[0]->context),
  326. SDE_EVTLOG_L32(array->fences[0]->context),
  327. SDE_EVTLOG_H32(array->fences[0]->seqno),
  328. SDE_EVTLOG_L32(array->fences[0]->seqno));
  329. else
  330. SDE_EVT32(ctl_id, num_fences, array->num_fences, i,
  331. SDE_EVTLOG_ERROR);
  332. /* remove refcount on parent */
  333. dma_fence_put(fences[i]);
  334. } else {
  335. fence_list[fence_list_index++] = fences[i];
  336. SDE_EVT32(ctl_id, num_fences, i, SDE_EVTLOG_H32(fences[i]->context),
  337. SDE_EVTLOG_L32(fences[i]->context),
  338. SDE_EVTLOG_H32(fences[i]->seqno),
  339. SDE_EVTLOG_L32(fences[i]->seqno));
  340. }
  341. }
  342. seqno = data->hw_fence_array_seqno++;
  343. temp_array = dma_fence_array_create(fence_list_index, fence_list,
  344. data->dma_context, seqno, 0);
  345. if (!temp_array) {
  346. SDE_ERROR("unable to create fence array, cant register for wait\n");
  347. _cleanup_fences_refcount(fences, num_fences);
  348. kfree(fence_list);
  349. return -EINVAL;
  350. }
  351. SDE_EVT32(ctl_id, fence_list_index, SDE_EVTLOG_H32(data->dma_context),
  352. SDE_EVTLOG_L32(data->dma_context), SDE_EVTLOG_H32(seqno),
  353. SDE_EVTLOG_L32(seqno));
  354. base_fence = &temp_array->base;
  355. hw_fences = &base_fence;
  356. num_hw_fences = 1;
  357. } else {
  358. struct dma_fence_array *tmp_array;
  359. hw_fences = fences;
  360. num_hw_fences = num_fences;
  361. tmp_array = dma_fence_is_array(fences[0]) ?
  362. container_of(fences[0], struct dma_fence_array, base) :
  363. NULL;
  364. SDE_EVT32(ctl_id, num_hw_fences, SDE_EVTLOG_H32(fences[0]->context),
  365. SDE_EVTLOG_L32(fences[0]->context), SDE_EVTLOG_H32(fences[0]->seqno),
  366. SDE_EVTLOG_L32(fences[0]->seqno), fences[0]->flags,
  367. tmp_array ? tmp_array->num_fences : SDE_EVTLOG_FUNC_CASE2);
  368. }
  369. /* register for wait */
  370. ret = msm_hw_fence_wait_update(data->hw_fence_handle, hw_fences, num_hw_fences, true);
  371. if (ret)
  372. SDE_ERROR("failed to register wait fences for ctl_id:%d ret:%d\n", ctl_id, ret);
  373. /* fence-array put will release each individual extra refcount during array release */
  374. if (temp_array)
  375. dma_fence_put(&temp_array->base);
  376. else
  377. dma_fence_put(fences[0]);
  378. SDE_EVT32_VERBOSE(ctl_id, num_fences, ret);
  379. return ret;
  380. }
  381. static int _arm_output_hw_fence(struct sde_hw_ctl *hw_ctl, bool vid_mode, u32 line_count,
  382. u32 debugfs_hw_fence)
  383. {
  384. struct sde_hw_fence_data *data;
  385. u32 ipcc_out_signal;
  386. int ctl_id;
  387. if (!hw_ctl || !hw_ctl->ops.hw_fence_trigger_output_fence ||
  388. !hw_ctl->ops.hw_fence_update_output_fence) {
  389. SDE_ERROR("missing ctl/trigger or update fence %d\n", !hw_ctl);
  390. return -EINVAL;
  391. }
  392. ctl_id = hw_ctl->idx - CTL_0;
  393. data = &hw_ctl->hwfence_data;
  394. if (data->ipcc_out_signal_pp_idx >= MAX_SDE_HFENCE_OUT_SIGNAL_PING_PONG) {
  395. /* This should not have happened!, review the ping pong calculation */
  396. SDE_ERROR("Wrong pp_idx:%d, max:%d\n", data->ipcc_out_signal_pp_idx,
  397. MAX_SDE_HFENCE_OUT_SIGNAL_PING_PONG);
  398. return -EINVAL;
  399. }
  400. ipcc_out_signal = data->ipcc_out_signal_pp[data->ipcc_out_signal_pp_idx];
  401. data->ipcc_out_signal_pp_idx = (++data->ipcc_out_signal_pp_idx %
  402. MAX_SDE_HFENCE_OUT_SIGNAL_PING_PONG);
  403. SDE_DEBUG("out-fence ctl_id:%d out_signal:%d hw_fence_client:%s\n",
  404. ctl_id, ipcc_out_signal, _get_client_id_name(data->hw_fence_client_id));
  405. if ((debugfs_hw_fence & SDE_OUTPUT_HW_FENCE_TIMESTAMP) &&
  406. hw_ctl->ops.hw_fence_output_timestamp_ctrl)
  407. hw_ctl->ops.hw_fence_output_timestamp_ctrl(hw_ctl, true, false);
  408. /* update client/signal output fence */
  409. hw_ctl->ops.hw_fence_update_output_fence(hw_ctl, data->ipcc_out_client, ipcc_out_signal);
  410. SDE_EVT32_VERBOSE(ctl_id, ipcc_out_signal);
  411. /* arm dpu to trigger output fence signal once ready */
  412. if (line_count)
  413. hw_ctl->ops.hw_fence_trigger_output_fence(hw_ctl,
  414. HW_FENCE_TRIGGER_SEL_PROG_LINE_COUNT);
  415. else if (vid_mode && (hw_ctl->caps->features & BIT(SDE_CTL_HW_FENCE_TRIGGER_SEL)))
  416. hw_ctl->ops.hw_fence_trigger_output_fence(hw_ctl, HW_FENCE_TRIGGER_SEL_VID_MODE);
  417. else
  418. hw_ctl->ops.hw_fence_trigger_output_fence(hw_ctl, HW_FENCE_TRIGGER_SEL_CMD_MODE);
  419. return 0;
  420. }
  421. static int _sde_fence_arm_output_hw_fence(struct sde_fence_context *ctx, bool vid_mode,
  422. u32 line_count, u32 debugfs_hw_fence)
  423. {
  424. struct sde_hw_ctl *hw_ctl = NULL;
  425. struct sde_fence *fc, *next;
  426. spin_lock(&ctx->list_lock);
  427. if (list_empty(&ctx->fence_list_head)) {
  428. spin_unlock(&ctx->list_lock);
  429. return 0;
  430. }
  431. list_for_each_entry_safe(fc, next, &ctx->fence_list_head, fence_list) {
  432. struct dma_fence *fence = &fc->base;
  433. /* this is not hw-fence, or already processed */
  434. if (!test_bit(MSM_HW_FENCE_FLAG_ENABLED_BIT, &fence->flags))
  435. continue;
  436. hw_ctl = fc->hwfence_out_ctl;
  437. if (!hw_ctl) {
  438. /*
  439. * We flaged an output dma-fence as hw-fence but the hw ctl to handle
  440. * it is not available, this should not have happened, but if it does,
  441. * this can translate to a fence-timeout!
  442. */
  443. SDE_ERROR("invalid hw ctl, this can cause a fence-timeout!\n");
  444. SDE_EVT32(SDE_EVTLOG_ERROR, SDE_EVTLOG_FUNC_CASE1, fence->flags,
  445. fence->context, fence->seqno);
  446. spin_unlock(&ctx->list_lock);
  447. return -EINVAL;
  448. }
  449. }
  450. spin_unlock(&ctx->list_lock);
  451. /* arm dpu to trigger output hw-fence ipcc signal upon completion */
  452. if (hw_ctl)
  453. _arm_output_hw_fence(hw_ctl, vid_mode, line_count, debugfs_hw_fence);
  454. return 0;
  455. }
  456. void sde_fence_output_hw_fence_dir_write_init(struct sde_hw_ctl *hw_ctl)
  457. {
  458. if (hw_ctl && hw_ctl->ops.hw_fence_output_fence_dir_write_init)
  459. hw_ctl->ops.hw_fence_output_fence_dir_write_init(hw_ctl,
  460. hw_ctl->hwfence_data.txq_wr_ptr_pa, HW_FENCE_DIR_WRITE_SIZE,
  461. HW_FENCE_DIR_WRITE_MASK);
  462. }
  463. /* update output hw_fences txq */
  464. int sde_fence_update_hw_fences_txq(struct sde_fence_context *ctx, bool vid_mode, u32 line_count,
  465. u32 debugfs_hw_fence)
  466. {
  467. int ret = 0;
  468. struct sde_hw_fence_data *data;
  469. struct sde_fence *fc, *next;
  470. struct sde_hw_ctl *hw_ctl = NULL;
  471. int ctl_id;
  472. bool txq_updated = false;
  473. spin_lock(&ctx->list_lock);
  474. if (list_empty(&ctx->fence_list_head)) {
  475. spin_unlock(&ctx->list_lock);
  476. return 0;
  477. }
  478. list_for_each_entry_safe(fc, next, &ctx->fence_list_head, fence_list) {
  479. struct dma_fence *fence = &fc->base;
  480. /* this is not hw-fence, or already processed */
  481. if (!test_bit(MSM_HW_FENCE_FLAG_ENABLED_BIT, &fence->flags) ||
  482. fc->txq_updated_fence)
  483. continue;
  484. hw_ctl = fc->hwfence_out_ctl;
  485. if (!hw_ctl) {
  486. /* We flaged an output dma-fence as hw-fence but the hw ctl to handle
  487. * it is not available, this should not have happened, but if it does,
  488. * this can translate to a fence-timeout!
  489. */
  490. SDE_ERROR("invalid hw ctl, this can cause a fence-timeout!\n");
  491. SDE_EVT32(SDE_EVTLOG_FUNC_CASE1, fence->flags, fence->context,
  492. fence->seqno, SDE_EVTLOG_ERROR);
  493. ret = -EINVAL;
  494. goto exit;
  495. }
  496. ctl_id = hw_ctl->idx - CTL_0;
  497. data = &hw_ctl->hwfence_data;
  498. if (IS_ERR_OR_NULL(data->hw_fence_handle)) {
  499. SDE_ERROR("unexpected handle for ctl_id:%d, this can fence-timeout\n",
  500. ctl_id);
  501. SDE_EVT32(SDE_EVTLOG_FUNC_CASE2, fence->flags, fence->context,
  502. fence->seqno, ctl_id, SDE_EVTLOG_ERROR);
  503. ret = -EINVAL;
  504. goto exit;
  505. }
  506. /* update hw-fence tx queue */
  507. SDE_EVT32(ctl_id, SDE_EVTLOG_H32(fc->hwfence_index),
  508. SDE_EVTLOG_L32(fc->hwfence_index), *data->txq_tx_wm_va);
  509. ret = msm_hw_fence_update_txq(data->hw_fence_handle, fc->hwfence_index, 0, 0);
  510. if (ret) {
  511. SDE_ERROR("fail txq update index:%llu fctx:%llu seqno:%llu client:%d\n",
  512. fc->hwfence_index, fence->context, fence->seqno,
  513. data->hw_fence_client_id);
  514. SDE_EVT32(SDE_EVTLOG_FUNC_CASE3, fence->flags, fence->context,
  515. fence->seqno, ctl_id, SDE_EVTLOG_ERROR);
  516. goto exit;
  517. }
  518. /* update hw-fence tx queue wr_idx data */
  519. if (hw_ctl->ops.hw_fence_output_fence_dir_write_data)
  520. hw_ctl->ops.hw_fence_output_fence_dir_write_data(hw_ctl,
  521. *data->txq_tx_wm_va);
  522. /* avoid updating txq more than once and avoid repeating the same fence twice */
  523. txq_updated = fc->txq_updated_fence = true;
  524. SDE_DEBUG("update txq fence:0x%pK ctx:%llu seqno:%llu f:0x%llx ctl:%d vid:%d\n",
  525. fence, fence->context, fence->seqno, fence->flags, ctl_id, vid_mode);
  526. /* We will update TxQ one time per frame */
  527. if (txq_updated)
  528. break;
  529. }
  530. exit:
  531. spin_unlock(&ctx->list_lock);
  532. /* arm dpu to trigger output hw-fence ipcc signal upon completion in vid-mode */
  533. if ((txq_updated && hw_ctl) || line_count)
  534. _sde_fence_arm_output_hw_fence(ctx, vid_mode, line_count, debugfs_hw_fence);
  535. return ret;
  536. }
  537. static void _sde_hw_fence_release(struct sde_fence *f)
  538. {
  539. struct sde_hw_fence_data *data;
  540. struct sde_hw_ctl *hw_ctl = f->hwfence_out_ctl;
  541. int ctl_id;
  542. int ret;
  543. if (!hw_ctl) {
  544. SDE_ERROR("invalid hw_ctl\n");
  545. return;
  546. }
  547. ctl_id = hw_ctl->idx - CTL_0;
  548. data = &hw_ctl->hwfence_data;
  549. if (IS_ERR_OR_NULL(data->hw_fence_handle)) {
  550. SDE_ERROR("unexpected handle for ctl_id:%d\n", ctl_id);
  551. return;
  552. }
  553. SDE_DEBUG("destroy hw fence ctl_id:%d ctx:%llu seqno:%llu name:%s\n",
  554. ctl_id, f->base.context, f->base.seqno, f->name);
  555. /* Delete the HW fence */
  556. ret = msm_hw_fence_destroy(data->hw_fence_handle, &f->base);
  557. if (ret)
  558. SDE_ERROR("failed to destroy hw_fence for ctl_id:%d ctx:%llu seqno:%llu\n", ctl_id,
  559. f->base.context, f->base.seqno);
  560. }
  561. static int _reset_hw_fence_timeline(struct sde_hw_ctl *hw_ctl, u32 flags)
  562. {
  563. struct sde_hw_fence_data *data;
  564. int ret = 0;
  565. data = &hw_ctl->hwfence_data;
  566. if (!IS_ERR_OR_NULL(data->hw_fence_handle)) {
  567. SDE_EVT32(data->hw_fence_client_id);
  568. ret = msm_hw_fence_reset_client(data->hw_fence_handle, flags);
  569. if (ret) {
  570. pr_err("failed to reset client %d\n", data->hw_fence_client_id);
  571. return -EINVAL;
  572. }
  573. }
  574. return ret;
  575. }
  576. int sde_fence_update_input_hw_fence_signal(struct sde_hw_ctl *hw_ctl, u32 debugfs_hw_fence,
  577. struct sde_hw_mdp *hw_mdp, bool disable)
  578. {
  579. struct sde_hw_fence_data *data;
  580. u32 ipcc_signal_id;
  581. u32 ipcc_client_id;
  582. int ctl_id;
  583. u64 qtime;
  584. /* we must support sw_override as well, so check both functions */
  585. if (!hw_mdp || !hw_ctl || !hw_ctl->ops.hw_fence_update_input_fence ||
  586. !hw_ctl->ops.hw_fence_trigger_sw_override) {
  587. SDE_ERROR("missing ctl/override/update fence %d\n", !hw_ctl);
  588. return -EINVAL;
  589. }
  590. ctl_id = hw_ctl->idx - CTL_0;
  591. data = &hw_ctl->hwfence_data;
  592. if (disable) {
  593. hw_ctl->ops.hw_fence_ctrl(hw_ctl, false, false, 0);
  594. return -EPERM;
  595. }
  596. if ((debugfs_hw_fence & SDE_INPUT_HW_FENCE_TIMESTAMP)
  597. && hw_mdp->ops.hw_fence_input_timestamp_ctrl)
  598. hw_mdp->ops.hw_fence_input_timestamp_ctrl(hw_mdp, true, false);
  599. ipcc_signal_id = data->ipcc_in_signal;
  600. ipcc_client_id = data->ipcc_in_client;
  601. SDE_DEBUG("configure input signal:%d out client:%d ctl_id:%d\n", ipcc_signal_id,
  602. ipcc_client_id, ctl_id);
  603. /* configure dpu hw for the client/signal pair signaling input-fence */
  604. hw_ctl->ops.hw_fence_update_input_fence(hw_ctl, ipcc_client_id, ipcc_signal_id);
  605. /* Enable hw-fence for this ctrl-path */
  606. hw_ctl->ops.hw_fence_ctrl(hw_ctl, true, true, 1);
  607. qtime = arch_timer_read_counter();
  608. SDE_EVT32(ctl_id, ipcc_signal_id, ipcc_client_id, SDE_EVTLOG_H32(qtime),
  609. SDE_EVTLOG_L32(qtime));
  610. return 0;
  611. }
  612. void sde_fence_error_ctx_update(struct sde_fence_context *ctx, int input_fence_status,
  613. enum sde_fence_error_state sde_fence_error_state)
  614. {
  615. if (!ctx) {
  616. SDE_DEBUG("invalid fence\n");
  617. SDE_EVT32(input_fence_status, sde_fence_error_state, SDE_EVTLOG_ERROR);
  618. return;
  619. }
  620. ctx->sde_fence_error_ctx.fence_error_status = input_fence_status;
  621. ctx->sde_fence_error_ctx.fence_error_state = sde_fence_error_state;
  622. }
  623. void *sde_sync_get(uint64_t fd)
  624. {
  625. /* force signed compare, fdget accepts an int argument */
  626. return (signed int)fd >= 0 ? sync_file_get_fence(fd) : NULL;
  627. }
  628. void sde_sync_put(void *fence)
  629. {
  630. if (fence)
  631. dma_fence_put(fence);
  632. }
  633. void sde_fence_dump(struct dma_fence *fence)
  634. {
  635. char timeline_str[TIMELINE_VAL_LENGTH];
  636. if (fence->ops->timeline_value_str)
  637. fence->ops->timeline_value_str(fence, timeline_str, TIMELINE_VAL_LENGTH);
  638. SDE_ERROR(
  639. "fence drv name:%s timeline name:%s seqno:0x%llx timeline:%s signaled:0x%x status:%d flags:0x%x\n",
  640. fence->ops->get_driver_name(fence),
  641. fence->ops->get_timeline_name(fence),
  642. fence->seqno, timeline_str,
  643. fence->ops->signaled ?
  644. fence->ops->signaled(fence) : 0xffffffff,
  645. dma_fence_get_status(fence), fence->flags);
  646. }
  647. static void sde_fence_dump_user_fds_info(struct dma_fence *base_fence)
  648. {
  649. struct dma_fence_array *array;
  650. struct dma_fence *user_fence;
  651. int i;
  652. array = container_of(base_fence, struct dma_fence_array, base);
  653. if (test_bit(SPEC_FENCE_FLAG_FENCE_ARRAY, &base_fence->flags) &&
  654. test_bit(SPEC_FENCE_FLAG_ARRAY_BIND, &base_fence->flags)) {
  655. for (i = 0; i < array->num_fences; i++) {
  656. user_fence = array->fences[i];
  657. if (user_fence) {
  658. dma_fence_get(user_fence);
  659. sde_fence_dump(user_fence);
  660. dma_fence_put(user_fence);
  661. }
  662. }
  663. }
  664. }
  665. signed long sde_sync_wait(void *fnc, long timeout_ms, int *error_status)
  666. {
  667. struct dma_fence *fence = fnc;
  668. int rc, status = 0;
  669. if (!fence)
  670. return -EINVAL;
  671. else if (dma_fence_is_signaled(fence))
  672. return timeout_ms ? msecs_to_jiffies(timeout_ms) : 1;
  673. rc = dma_fence_wait_timeout(fence, true,
  674. msecs_to_jiffies(timeout_ms));
  675. if (!rc || (rc == -EINVAL) || fence->error) {
  676. status = dma_fence_get_status(fence);
  677. if (test_bit(SPEC_FENCE_FLAG_FENCE_ARRAY, &fence->flags)) {
  678. if (status == -EINVAL) {
  679. SDE_INFO("spec fence bind failure status:%d\n", status);
  680. rc = -EBADF;
  681. } else if (fence->ops->signaled && fence->ops->signaled(fence)) {
  682. SDE_INFO("spec fence status:%d\n", status);
  683. } else {
  684. sde_fence_dump(fence);
  685. sde_fence_dump_user_fds_info(fence);
  686. }
  687. } else {
  688. sde_fence_dump(fence);
  689. }
  690. }
  691. if (error_status)
  692. *error_status = fence->error;
  693. return rc;
  694. }
  695. uint32_t sde_sync_get_name_prefix(void *fence)
  696. {
  697. const char *name;
  698. uint32_t i, prefix;
  699. struct dma_fence *f = fence;
  700. if (!fence)
  701. return 0;
  702. name = f->ops->get_driver_name(f);
  703. if (!name)
  704. return 0;
  705. prefix = 0x0;
  706. for (i = 0; i < sizeof(uint32_t) && name[i]; ++i)
  707. prefix = (prefix << CHAR_BIT) | name[i];
  708. return prefix;
  709. }
  710. static void sde_fence_destroy(struct kref *kref)
  711. {
  712. struct sde_fence_context *ctx;
  713. if (!kref) {
  714. SDE_ERROR("received invalid kref\n");
  715. return;
  716. }
  717. ctx = container_of(kref, struct sde_fence_context, kref);
  718. kfree(ctx);
  719. }
  720. static inline struct sde_fence *to_sde_fence(struct dma_fence *fence)
  721. {
  722. return container_of(fence, struct sde_fence, base);
  723. }
  724. static const char *sde_fence_get_driver_name(struct dma_fence *fence)
  725. {
  726. struct sde_fence *f = to_sde_fence(fence);
  727. return f->name;
  728. }
  729. static const char *sde_fence_get_timeline_name(struct dma_fence *fence)
  730. {
  731. struct sde_fence *f = to_sde_fence(fence);
  732. return f->ctx->name;
  733. }
  734. static bool sde_fence_enable_signaling(struct dma_fence *fence)
  735. {
  736. return true;
  737. }
  738. static bool sde_fence_signaled(struct dma_fence *fence)
  739. {
  740. struct sde_fence *f = to_sde_fence(fence);
  741. bool status;
  742. status = ((int)(fence->seqno - f->ctx->done_count) <= 0);
  743. SDE_DEBUG("status:%d fence seq:%llu and timeline:%u\n",
  744. status, fence->seqno, f->ctx->done_count);
  745. return status;
  746. }
  747. static void sde_fence_release(struct dma_fence *fence)
  748. {
  749. struct sde_fence *f;
  750. if (fence) {
  751. f = to_sde_fence(fence);
  752. /* Delete the HW fence */
  753. if (test_bit(MSM_HW_FENCE_FLAG_ENABLED_BIT, &fence->flags))
  754. _sde_hw_fence_release(f);
  755. kref_put(&f->ctx->kref, sde_fence_destroy);
  756. kfree(f);
  757. }
  758. }
  759. static void sde_fence_value_str(struct dma_fence *fence, char *str, int size)
  760. {
  761. if (!fence || !str)
  762. return;
  763. snprintf(str, size, "%llu", fence->seqno);
  764. }
  765. static void sde_fence_timeline_value_str(struct dma_fence *fence, char *str,
  766. int size)
  767. {
  768. struct sde_fence *f = to_sde_fence(fence);
  769. if (!fence || !f->ctx || !str)
  770. return;
  771. snprintf(str, size, "%d", f->ctx->done_count);
  772. }
  773. static struct dma_fence_ops sde_fence_ops = {
  774. .get_driver_name = sde_fence_get_driver_name,
  775. .get_timeline_name = sde_fence_get_timeline_name,
  776. .enable_signaling = sde_fence_enable_signaling,
  777. .signaled = sde_fence_signaled,
  778. .wait = dma_fence_default_wait,
  779. .release = sde_fence_release,
  780. .fence_value_str = sde_fence_value_str,
  781. .timeline_value_str = sde_fence_timeline_value_str,
  782. };
  783. /**
  784. * _sde_fence_create_fd - create fence object and return an fd for it
  785. * This function is NOT thread-safe.
  786. * @timeline: Timeline to associate with fence
  787. * @val: Timeline value at which to signal the fence
  788. * Return: File descriptor on success, or error code on error
  789. */
  790. static int _sde_fence_create_fd(void *fence_ctx, uint32_t val, struct sde_hw_ctl *hw_ctl)
  791. {
  792. struct sde_fence *sde_fence;
  793. struct sync_file *sync_file;
  794. signed int fd = -EINVAL;
  795. struct sde_fence_context *ctx = fence_ctx;
  796. if (!ctx) {
  797. SDE_ERROR("invalid context\n");
  798. goto exit;
  799. }
  800. sde_fence = kzalloc(sizeof(*sde_fence), GFP_KERNEL);
  801. if (!sde_fence)
  802. return -ENOMEM;
  803. sde_fence->ctx = fence_ctx;
  804. snprintf(sde_fence->name, SDE_FENCE_NAME_SIZE, "sde_fence:%s:%u",
  805. sde_fence->ctx->name, val);
  806. dma_fence_init(&sde_fence->base, &sde_fence_ops, &ctx->lock,
  807. ctx->context, val);
  808. kref_get(&ctx->kref);
  809. ctx->sde_fence_error_ctx.curr_frame_fence_seqno = val;
  810. /* create fd */
  811. fd = get_unused_fd_flags(0);
  812. if (fd < 0) {
  813. SDE_ERROR("failed to get_unused_fd_flags(), %s\n",
  814. sde_fence->name);
  815. dma_fence_put(&sde_fence->base);
  816. goto exit;
  817. }
  818. /* create fence */
  819. sync_file = sync_file_create(&sde_fence->base);
  820. if (sync_file == NULL) {
  821. put_unused_fd(fd);
  822. fd = -EINVAL;
  823. SDE_ERROR("couldn't create fence, %s\n", sde_fence->name);
  824. dma_fence_put(&sde_fence->base);
  825. goto exit;
  826. }
  827. /* If ctl_id is valid, try to create a hw-fence */
  828. if (hw_ctl)
  829. sde_fence_create_hw_fence(hw_ctl, sde_fence);
  830. fd_install(fd, sync_file->file);
  831. sde_fence->fd = fd;
  832. spin_lock(&ctx->list_lock);
  833. list_add_tail(&sde_fence->fence_list, &ctx->fence_list_head);
  834. spin_unlock(&ctx->list_lock);
  835. exit:
  836. return fd;
  837. }
  838. struct sde_fence_context *sde_fence_init(const char *name, uint32_t drm_id)
  839. {
  840. struct sde_fence_context *ctx;
  841. if (!name) {
  842. SDE_ERROR("invalid argument(s)\n");
  843. return ERR_PTR(-EINVAL);
  844. }
  845. ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
  846. if (!ctx) {
  847. SDE_ERROR("failed to alloc fence ctx\n");
  848. return ERR_PTR(-ENOMEM);
  849. }
  850. strlcpy(ctx->name, name, ARRAY_SIZE(ctx->name));
  851. ctx->drm_id = drm_id;
  852. kref_init(&ctx->kref);
  853. ctx->context = dma_fence_context_alloc(1);
  854. spin_lock_init(&ctx->lock);
  855. spin_lock_init(&ctx->list_lock);
  856. INIT_LIST_HEAD(&ctx->fence_list_head);
  857. return ctx;
  858. }
  859. void sde_fence_deinit(struct sde_fence_context *ctx)
  860. {
  861. if (!ctx) {
  862. SDE_ERROR("invalid fence\n");
  863. return;
  864. }
  865. kref_put(&ctx->kref, sde_fence_destroy);
  866. }
  867. void sde_fence_prepare(struct sde_fence_context *ctx)
  868. {
  869. unsigned long flags;
  870. if (!ctx) {
  871. SDE_ERROR("invalid argument(s), fence %pK\n", ctx);
  872. } else {
  873. spin_lock_irqsave(&ctx->lock, flags);
  874. ++ctx->commit_count;
  875. spin_unlock_irqrestore(&ctx->lock, flags);
  876. }
  877. }
  878. static void _sde_fence_trigger(struct sde_fence_context *ctx, bool error, ktime_t ts)
  879. {
  880. unsigned long flags;
  881. struct sde_fence *fc, *next;
  882. bool is_signaled = false;
  883. enum sde_fence_error_state fence_error_state = 0;
  884. struct sde_fence_error_ctx *fence_error_ctx;
  885. kref_get(&ctx->kref);
  886. spin_lock(&ctx->list_lock);
  887. if (list_empty(&ctx->fence_list_head)) {
  888. SDE_DEBUG("nothing to trigger!\n");
  889. goto end;
  890. }
  891. fence_error_ctx = &ctx->sde_fence_error_ctx;
  892. list_for_each_entry_safe(fc, next, &ctx->fence_list_head, fence_list) {
  893. spin_lock_irqsave(&ctx->lock, flags);
  894. if (error)
  895. dma_fence_set_error(&fc->base, -EBUSY);
  896. fence_error_state = fence_error_ctx->fence_error_state;
  897. if (fence_error_state) {
  898. if (fence_error_state == HANDLE_OUT_OF_ORDER &&
  899. fence_error_ctx->last_good_frame_fence_seqno == fc->base.seqno) {
  900. SDE_EVT32(fence_error_ctx->last_good_frame_fence_seqno,
  901. fence_error_state, SDE_EVTLOG_FUNC_CASE1);
  902. spin_unlock_irqrestore(&ctx->lock, flags);
  903. continue;
  904. } else if (((fence_error_state == HANDLE_OUT_OF_ORDER) ||
  905. (fence_error_state == SET_ERROR_ONLY_VID) ||
  906. (fence_error_state == SET_ERROR_ONLY_CMD_RELEASE))
  907. && (fence_error_ctx->fence_error_status < 0)) {
  908. dma_fence_set_error(&fc->base, fence_error_ctx->fence_error_status);
  909. dma_fence_signal_timestamp_locked(&fc->base, ts);
  910. spin_unlock_irqrestore(&ctx->lock, flags);
  911. SDE_EVT32(fence_error_state, fence_error_ctx->fence_error_status,
  912. ktime_to_us(ts), fc->base.seqno, SDE_EVTLOG_FUNC_CASE2);
  913. list_del_init(&fc->fence_list);
  914. dma_fence_put(&fc->base);
  915. continue;
  916. }
  917. }
  918. is_signaled = sde_fence_signaled(&fc->base);
  919. if (is_signaled)
  920. dma_fence_signal_timestamp_locked(&fc->base, ts);
  921. spin_unlock_irqrestore(&ctx->lock, flags);
  922. if (is_signaled) {
  923. list_del_init(&fc->fence_list);
  924. dma_fence_put(&fc->base);
  925. }
  926. }
  927. end:
  928. spin_unlock(&ctx->list_lock);
  929. kref_put(&ctx->kref, sde_fence_destroy);
  930. }
  931. int sde_fence_create(struct sde_fence_context *ctx, uint64_t *val,
  932. uint32_t offset, struct sde_hw_ctl *hw_ctl)
  933. {
  934. uint32_t trigger_value;
  935. int fd, rc = -EINVAL;
  936. unsigned long flags;
  937. if (!ctx || !val) {
  938. SDE_ERROR("invalid argument(s), fence %d, pval %d\n",
  939. ctx != NULL, val != NULL);
  940. return rc;
  941. }
  942. /*
  943. * Allow created fences to have a constant offset with respect
  944. * to the timeline. This allows us to delay the fence signalling
  945. * w.r.t. the commit completion (e.g., an offset of +1 would
  946. * cause fences returned during a particular commit to signal
  947. * after an additional delay of one commit, rather than at the
  948. * end of the current one.
  949. */
  950. spin_lock_irqsave(&ctx->lock, flags);
  951. trigger_value = ctx->commit_count + offset;
  952. spin_unlock_irqrestore(&ctx->lock, flags);
  953. fd = _sde_fence_create_fd(ctx, trigger_value, hw_ctl);
  954. *val = fd;
  955. SDE_DEBUG("fd:%d trigger:%d commit:%d offset:%d\n",
  956. fd, trigger_value, ctx->commit_count, offset);
  957. SDE_EVT32(ctx->drm_id, trigger_value, fd, hw_ctl ? hw_ctl->idx : 0);
  958. rc = (fd >= 0) ? 0 : fd;
  959. return rc;
  960. }
  961. void sde_fence_signal(struct sde_fence_context *ctx, ktime_t ts,
  962. enum sde_fence_event fence_event, struct sde_hw_ctl *hw_ctl)
  963. {
  964. unsigned long flags;
  965. if (!ctx) {
  966. SDE_ERROR("invalid ctx, %pK\n", ctx);
  967. return;
  968. }
  969. spin_lock_irqsave(&ctx->lock, flags);
  970. if (fence_event == SDE_FENCE_RESET_TIMELINE) {
  971. /* reset hw-fences without error */
  972. if (hw_ctl)
  973. _reset_hw_fence_timeline(hw_ctl, MSM_HW_FENCE_RESET_WITHOUT_ERROR |
  974. MSM_HW_FENCE_RESET_WITHOUT_DESTROY);
  975. if ((int)(ctx->done_count - ctx->commit_count) < 0) {
  976. SDE_DEBUG(
  977. "timeline reset attempt! ctx:0x%x done count:%d commit:%d\n",
  978. ctx->drm_id, ctx->done_count, ctx->commit_count);
  979. ctx->done_count = ctx->commit_count;
  980. SDE_EVT32(ctx->drm_id, ctx->done_count,
  981. ctx->commit_count, ktime_to_us(ts),
  982. fence_event, SDE_EVTLOG_FUNC_CASE1);
  983. } else {
  984. spin_unlock_irqrestore(&ctx->lock, flags);
  985. return;
  986. }
  987. } else if ((int)(ctx->done_count - ctx->commit_count) < 0) {
  988. ++ctx->done_count;
  989. SDE_DEBUG("fence_signal:done count:%d commit count:%d\n",
  990. ctx->done_count, ctx->commit_count);
  991. } else {
  992. SDE_ERROR("extra signal attempt! done count:%d commit:%d\n",
  993. ctx->done_count, ctx->commit_count);
  994. SDE_EVT32(ctx->drm_id, ctx->done_count, ctx->commit_count,
  995. ktime_to_us(ts), fence_event, SDE_EVTLOG_FATAL);
  996. spin_unlock_irqrestore(&ctx->lock, flags);
  997. return;
  998. }
  999. spin_unlock_irqrestore(&ctx->lock, flags);
  1000. SDE_EVT32(ctx->drm_id, ctx->done_count, ctx->commit_count,
  1001. ktime_to_us(ts));
  1002. _sde_fence_trigger(ctx, (fence_event == SDE_FENCE_SIGNAL_ERROR), ts);
  1003. }
  1004. void sde_fence_timeline_status(struct sde_fence_context *ctx,
  1005. struct drm_mode_object *drm_obj)
  1006. {
  1007. char *obj_name;
  1008. if (!ctx || !drm_obj) {
  1009. SDE_ERROR("invalid input params\n");
  1010. return;
  1011. }
  1012. switch (drm_obj->type) {
  1013. case DRM_MODE_OBJECT_CRTC:
  1014. obj_name = "crtc";
  1015. break;
  1016. case DRM_MODE_OBJECT_CONNECTOR:
  1017. obj_name = "connector";
  1018. break;
  1019. default:
  1020. obj_name = "unknown";
  1021. break;
  1022. }
  1023. SDE_ERROR("drm obj:%s id:%d type:0x%x done_count:%d commit_count:%d\n",
  1024. obj_name, drm_obj->id, drm_obj->type, ctx->done_count,
  1025. ctx->commit_count);
  1026. }
  1027. void sde_fence_list_dump(struct dma_fence *fence, struct seq_file **s)
  1028. {
  1029. char timeline_str[TIMELINE_VAL_LENGTH];
  1030. if (fence->ops->timeline_value_str)
  1031. fence->ops->timeline_value_str(fence,
  1032. timeline_str, TIMELINE_VAL_LENGTH);
  1033. seq_printf(*s, "fence name:%s timeline name:%s seqno:0x%llx timeline:%s signaled:0x%x\n",
  1034. fence->ops->get_driver_name(fence),
  1035. fence->ops->get_timeline_name(fence),
  1036. fence->seqno, timeline_str,
  1037. fence->ops->signaled ?
  1038. fence->ops->signaled(fence) : 0xffffffff);
  1039. }
  1040. void sde_debugfs_timeline_dump(struct sde_fence_context *ctx,
  1041. struct drm_mode_object *drm_obj, struct seq_file **s)
  1042. {
  1043. char *obj_name;
  1044. struct sde_fence *fc, *next;
  1045. struct dma_fence *fence;
  1046. if (!ctx || !drm_obj) {
  1047. SDE_ERROR("invalid input params\n");
  1048. return;
  1049. }
  1050. switch (drm_obj->type) {
  1051. case DRM_MODE_OBJECT_CRTC:
  1052. obj_name = "crtc";
  1053. break;
  1054. case DRM_MODE_OBJECT_CONNECTOR:
  1055. obj_name = "connector";
  1056. break;
  1057. default:
  1058. obj_name = "unknown";
  1059. break;
  1060. }
  1061. seq_printf(*s, "drm obj:%s id:%d type:0x%x done_count:%d commit_count:%d\n",
  1062. obj_name, drm_obj->id, drm_obj->type, ctx->done_count,
  1063. ctx->commit_count);
  1064. spin_lock(&ctx->list_lock);
  1065. list_for_each_entry_safe(fc, next, &ctx->fence_list_head, fence_list) {
  1066. fence = &fc->base;
  1067. sde_fence_list_dump(fence, s);
  1068. }
  1069. spin_unlock(&ctx->list_lock);
  1070. }