sde_fence.c 36 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
  4. * Copyright (c) 2016-2021, The Linux Foundation. All rights reserved.
  5. */
  6. #define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
  7. #include <linux/sync_file.h>
  8. #include <linux/dma-fence.h>
  9. #include <linux/dma-fence-array.h>
  10. #include <linux/file.h>
  11. #include "msm_drv.h"
  12. #include "sde_kms.h"
  13. #include "sde_fence.h"
  14. #include "sde_encoder.h"
  15. #define TIMELINE_VAL_LENGTH 128
  16. #define SPEC_FENCE_FLAG_FENCE_ARRAY 0x10
  17. #define SPEC_FENCE_FLAG_ARRAY_BIND 0x11
  18. #define HW_FENCE_DIR_WRITE_SIZE 0x2
  19. #define HW_FENCE_DIR_WRITE_MASK 0xFFFFFFFF
  20. #define HW_FENCE_HFI_MMAP_DPU_BA 0x200000
  21. /**
  22. * struct sde_fence - release/retire fence structure
  23. * @base: base fence structure
  24. * @ctx: fence context
  25. * @name: name of each fence- it is fence timeline + commit_count
  26. * @fence_list: list to associated this fence on timeline/context
  27. * @fd: fd attached to this fence - debugging purpose.
  28. * @hwfence_out_ctl: hw ctl for the output fence
  29. * @hwfence_index: hw fence index for this fence
  30. * @txq_updated_fence: flag to indicate that a fence has been updated in txq
  31. */
  32. struct sde_fence {
  33. struct dma_fence base;
  34. struct sde_fence_context *ctx;
  35. char name[SDE_FENCE_NAME_SIZE];
  36. struct list_head fence_list;
  37. int fd;
  38. struct sde_hw_ctl *hwfence_out_ctl;
  39. u64 hwfence_index;
  40. bool txq_updated_fence;
  41. };
  42. /**
  43. * enum sde_hw_fence_clients - sde clients for the hw-fence feature
  44. *
  45. * Do not modify the order of this struct and/or add more elements
  46. * without modify/add fields in the 'hw_fence_data' structs.
  47. */
  48. enum sde_hw_fence_clients {
  49. SDE_HW_FENCE_CLIENT_CTL_0,
  50. SDE_HW_FENCE_CLIENT_CTL_1,
  51. SDE_HW_FENCE_CLIENT_CTL_2,
  52. SDE_HW_FENCE_CLIENT_CTL_3,
  53. SDE_HW_FENCE_CLIENT_CTL_4,
  54. SDE_HW_FENCE_CLIENT_CTL_5,
  55. SDE_HW_FENCE_CLIENT_MAX,
  56. };
  57. /**
  58. * hw_fence_data_dpu_client - this table maps the dpu ipcc input and output signals for each display
  59. * clients to communicate with the fence controller.
  60. * This struct must match the order of the 'sde_hw_fence_clients' enum,
  61. * the output signal must match with the signals that FenceCTL expects for each display client.
  62. * This 'hw_fence_data_dpu_client' must be used for HW that does not support dpu-signal.
  63. */
  64. struct sde_hw_fence_data hw_fence_data_no_dpu[SDE_HW_FENCE_CLIENT_MAX] = {
  65. {SDE_HW_FENCE_CLIENT_CTL_0, HW_FENCE_CLIENT_ID_CTL0, NULL, {0}, NULL, NULL, 8, 14, {2, 3},
  66. 0, 8, 8, 0, 0},
  67. {SDE_HW_FENCE_CLIENT_CTL_1, HW_FENCE_CLIENT_ID_CTL1, NULL, {0}, NULL, NULL, 8, 15, {4, 5},
  68. 0, 8, 8, 0, 0},
  69. {SDE_HW_FENCE_CLIENT_CTL_2, HW_FENCE_CLIENT_ID_CTL2, NULL, {0}, NULL, NULL, 8, 16, {6, 7},
  70. 0, 8, 8, 0, 0},
  71. {SDE_HW_FENCE_CLIENT_CTL_3, HW_FENCE_CLIENT_ID_CTL3, NULL, {0}, NULL, NULL, 8, 17, {8, 9},
  72. 0, 8, 8, 0, 0},
  73. {SDE_HW_FENCE_CLIENT_CTL_4, HW_FENCE_CLIENT_ID_CTL4, NULL, {0}, NULL, NULL, 8, 18, {10, 11},
  74. 0, 8, 8, 0, 0},
  75. {SDE_HW_FENCE_CLIENT_CTL_5, HW_FENCE_CLIENT_ID_CTL5, NULL, {0}, NULL, NULL, 8, 19, {12, 13},
  76. 0, 8, 8, 0, 0}
  77. };
  78. /**
  79. * hw_fence_data_dpu_client - this table maps the dpu ipcc input and output signals for each display
  80. * clients to communicate with the fence controller.
  81. * This struct must match the order of the 'sde_hw_fence_clients' enum,
  82. * the output signal must match with the signals that FenceCTL expects for each display client.
  83. * This 'hw_fence_data_dpu_client' must be used for HW that supports dpu-signal
  84. */
  85. struct sde_hw_fence_data hw_fence_data_dpu_client[SDE_HW_FENCE_CLIENT_MAX] = {
  86. {SDE_HW_FENCE_CLIENT_CTL_0, HW_FENCE_CLIENT_ID_CTL0, NULL, {0}, NULL, NULL, 8, 0, {0, 6},
  87. 0, 8, 25, 0, 0},
  88. {SDE_HW_FENCE_CLIENT_CTL_1, HW_FENCE_CLIENT_ID_CTL1, NULL, {0}, NULL, NULL, 8, 1, {1, 7},
  89. 0, 8, 25, 0, 0},
  90. {SDE_HW_FENCE_CLIENT_CTL_2, HW_FENCE_CLIENT_ID_CTL2, NULL, {0}, NULL, NULL, 8, 2, {2, 8},
  91. 0, 8, 25, 0, 0},
  92. {SDE_HW_FENCE_CLIENT_CTL_3, HW_FENCE_CLIENT_ID_CTL3, NULL, {0}, NULL, NULL, 8, 3, {3, 9},
  93. 0, 8, 25, 0, 0},
  94. {SDE_HW_FENCE_CLIENT_CTL_4, HW_FENCE_CLIENT_ID_CTL4, NULL, {0}, NULL, NULL, 8, 4, {4, 10},
  95. 0, 8, 25, 0, 0},
  96. {SDE_HW_FENCE_CLIENT_CTL_5, HW_FENCE_CLIENT_ID_CTL5, NULL, {0}, NULL, NULL, 8, 5, {5, 11},
  97. 0, 8, 25, 0, 0}
  98. };
  99. void msm_hw_fence_error_cb(u32 handle, int error, void *cb_data)
  100. {
  101. struct msm_hw_fence_cb_data *msm_hw_fence_cb_data;
  102. struct sde_hw_fence_error_cb_data *sde_hw_fence_error_data;
  103. SDE_EVT32(handle, error, SDE_EVTLOG_FUNC_ENTRY);
  104. msm_hw_fence_cb_data = (struct msm_hw_fence_cb_data *)cb_data;
  105. if (!msm_hw_fence_cb_data) {
  106. SDE_ERROR("msm hw fence cb data is NULL.\n");
  107. SDE_EVT32(SDE_EVTLOG_FUNC_CASE1, SDE_EVTLOG_ERROR);
  108. return;
  109. }
  110. sde_hw_fence_error_data = (struct sde_hw_fence_error_cb_data *)(msm_hw_fence_cb_data->data);
  111. if (!sde_hw_fence_error_data) {
  112. SDE_ERROR("sde hw fence cb data is NULL.\n");
  113. SDE_EVT32(SDE_EVTLOG_FUNC_CASE2, SDE_EVTLOG_ERROR);
  114. return;
  115. }
  116. sde_encoder_handle_hw_fence_error(sde_hw_fence_error_data->ctl_idx,
  117. sde_hw_fence_error_data->sde_kms, handle, error);
  118. }
  119. int sde_hw_fence_init(struct sde_hw_ctl *hw_ctl, struct sde_kms *sde_kms, bool use_dpu_ipcc,
  120. struct msm_mmu *mmu)
  121. {
  122. struct msm_hw_fence_hfi_queue_header *hfi_queue_header_va, *hfi_queue_header_pa;
  123. struct msm_hw_fence_hfi_queue_table_header *hfi_table_header;
  124. struct sde_hw_fence_data *sde_hw_fence_data;
  125. struct sde_hw_fence_data *hwfence_data;
  126. struct sde_hw_fence_error_cb_data *sde_hw_fence_error_cb_data;
  127. phys_addr_t queue_pa;
  128. void *queue_va;
  129. u32 qhdr0_offset, ctl_hfi_iova;
  130. int ctl_id, ret;
  131. if (!hw_ctl || !hw_ctl->ops.hw_fence_output_fence_dir_write_init)
  132. return -EINVAL;
  133. ctl_id = hw_ctl->idx - CTL_0;
  134. if (ctl_id >= SDE_HW_FENCE_CLIENT_MAX || ctl_id < 0) {
  135. SDE_ERROR("unexpected ctl_id:%d\n", ctl_id);
  136. return -EINVAL;
  137. }
  138. hwfence_data = &hw_ctl->hwfence_data;
  139. sde_hw_fence_data = use_dpu_ipcc ? hw_fence_data_dpu_client : hw_fence_data_no_dpu;
  140. if (sde_hw_fence_data[ctl_id].client_id != ctl_id) {
  141. SDE_ERROR("Unexpected client_id:%d for ctl_id:%d\n",
  142. sde_hw_fence_data[ctl_id].client_id, ctl_id);
  143. return -EINVAL;
  144. }
  145. /* init the default fence-data for this client */
  146. memcpy(hwfence_data, &sde_hw_fence_data[ctl_id], sizeof(struct sde_hw_fence_data));
  147. SDE_DEBUG("hwfence register ctl:%d client:%d\n", ctl_id, hwfence_data->hw_fence_client_id);
  148. hwfence_data->hw_fence_handle = msm_hw_fence_register(hwfence_data->hw_fence_client_id,
  149. &hwfence_data->mem_descriptor);
  150. hwfence_data->dma_context = dma_fence_context_alloc(1);
  151. if (IS_ERR_OR_NULL(hwfence_data->hw_fence_handle)) {
  152. hwfence_data->hw_fence_handle = NULL;
  153. SDE_DEBUG("error cannot register ctl_id:%d hw-fence client:%d\n", ctl_id,
  154. hwfence_data->hw_fence_client_id);
  155. return -EINVAL;
  156. }
  157. sde_hw_fence_error_cb_data = &(hwfence_data->sde_hw_fence_error_cb_data);
  158. sde_hw_fence_error_cb_data->ctl_idx = hw_ctl->idx;
  159. sde_hw_fence_error_cb_data->sde_kms = sde_kms;
  160. ret = msm_hw_fence_register_error_cb(hwfence_data->hw_fence_handle,
  161. msm_hw_fence_error_cb, (void *)sde_hw_fence_error_cb_data);
  162. if (ret) {
  163. SDE_EVT32(hw_ctl->idx, SDE_EVTLOG_ERROR);
  164. SDE_DEBUG("hw fence cb register failed. ret = %d\n", ret);
  165. }
  166. /* one-to-one memory map of ctl-path client queues */
  167. ctl_hfi_iova = HW_FENCE_HFI_MMAP_DPU_BA +
  168. PAGE_ALIGN(hwfence_data->mem_descriptor.size * ctl_id);
  169. ret = mmu->funcs->one_to_one_map(mmu, ctl_hfi_iova,
  170. hwfence_data->mem_descriptor.device_addr,
  171. hwfence_data->mem_descriptor.size, IOMMU_READ | IOMMU_WRITE);
  172. if (ret) {
  173. SDE_ERROR("queue one2one memory smmu map failed, ret:%d ctl_id:%d, client:%d\n",
  174. ret, ctl_id, hwfence_data->hw_fence_client_id);
  175. return ret;
  176. }
  177. /* get queue header offset */
  178. queue_va = hwfence_data->mem_descriptor.virtual_addr;
  179. hfi_table_header = (struct msm_hw_fence_hfi_queue_table_header *)queue_va;
  180. qhdr0_offset = hfi_table_header->qhdr0_offset;
  181. /* initialize tx_wm pointer */
  182. hfi_queue_header_va = (struct msm_hw_fence_hfi_queue_header *)(queue_va + qhdr0_offset);
  183. hwfence_data->txq_tx_wm_va = &hfi_queue_header_va->tx_wm;
  184. /* initialize txq wr_ptr addr pointer */
  185. queue_pa = ctl_hfi_iova;
  186. hfi_queue_header_pa = (struct msm_hw_fence_hfi_queue_header *)(queue_pa + qhdr0_offset);
  187. hwfence_data->txq_wr_ptr_pa = &hfi_queue_header_pa->write_index;
  188. SDE_DEBUG("hwfence registered ctl:%d client:%d handle:0x%pK tx_wm:0x%x wr_idx:0x%x\n",
  189. ctl_id, hwfence_data->hw_fence_client_id, hwfence_data->hw_fence_handle,
  190. *hwfence_data->txq_tx_wm_va, *hwfence_data->txq_wr_ptr_pa);
  191. return 0;
  192. }
  193. void sde_hw_fence_deinit(struct sde_hw_ctl *hw_ctl)
  194. {
  195. struct sde_hw_fence_data *hwfence_data;
  196. if (!hw_ctl)
  197. return;
  198. hwfence_data = &hw_ctl->hwfence_data;
  199. /* client was not registered */
  200. if (IS_ERR_OR_NULL(hwfence_data->hw_fence_handle))
  201. return;
  202. SDE_DEBUG("hwfence deregister ctl_id:%d hw_fence_client_id:%d\n",
  203. hw_ctl->idx - CTL_0, hwfence_data->hw_fence_client_id);
  204. msm_hw_fence_deregister(hwfence_data->hw_fence_handle);
  205. hwfence_data->hw_fence_handle = NULL;
  206. }
  207. static int sde_fence_create_hw_fence(struct sde_hw_ctl *hw_ctl, struct sde_fence *sde_fence)
  208. {
  209. struct sde_hw_fence_data *data;
  210. struct msm_hw_fence_create_params params;
  211. int ctl_id;
  212. u64 hwfence_index;
  213. int ret;
  214. if (!hw_ctl)
  215. return -EINVAL;
  216. ctl_id = hw_ctl->idx - CTL_0;
  217. data = &hw_ctl->hwfence_data;
  218. if (IS_ERR_OR_NULL(data->hw_fence_handle)) {
  219. SDE_ERROR("unexpected handle for ctl_id:%d\n", ctl_id);
  220. return -EINVAL;
  221. }
  222. params.fence = &sde_fence->base;
  223. params.handle = &hwfence_index;
  224. /* Create the HW fence */
  225. ret = msm_hw_fence_create(data->hw_fence_handle, &params);
  226. if (ret) {
  227. SDE_ERROR("failed to create hw_fence for client:%d ctx:%llu seqno:%llu\n", ctl_id,
  228. sde_fence->base.context, sde_fence->base.seqno);
  229. } else {
  230. /* store ctl and index for this fence */
  231. sde_fence->hwfence_out_ctl = hw_ctl;
  232. sde_fence->hwfence_index = hwfence_index;
  233. SDE_DEBUG("create hfence index:%llu ctl:%d ctx:%llu seqno:%llu name:%s\n",
  234. sde_fence->hwfence_index, ctl_id, sde_fence->base.context,
  235. sde_fence->base.seqno, sde_fence->name);
  236. }
  237. return ret;
  238. }
  239. static inline char *_get_client_id_name(int hw_fence_client_id)
  240. {
  241. switch (hw_fence_client_id) {
  242. case HW_FENCE_CLIENT_ID_CTX0:
  243. return "HW_FENCE_CLIENT_ID_CTX0";
  244. case HW_FENCE_CLIENT_ID_CTL0:
  245. return "HW_FENCE_CLIENT_ID_CTL0";
  246. case HW_FENCE_CLIENT_ID_CTL1:
  247. return "HW_FENCE_CLIENT_ID_CTL1";
  248. case HW_FENCE_CLIENT_ID_CTL2:
  249. return "HW_FENCE_CLIENT_ID_CTL2";
  250. case HW_FENCE_CLIENT_ID_CTL3:
  251. return "HW_FENCE_CLIENT_ID_CTL3";
  252. case HW_FENCE_CLIENT_ID_CTL4:
  253. return "HW_FENCE_CLIENT_ID_CTL4";
  254. case HW_FENCE_CLIENT_ID_CTL5:
  255. return "HW_FENCE_CLIENT_ID_CTL15";
  256. default:
  257. return "Unknown";
  258. }
  259. return "unknown";
  260. }
  261. static void _cleanup_fences_refcount(struct dma_fence **fences, u32 num_fences)
  262. {
  263. int i;
  264. for (i = 0; i < num_fences; i++)
  265. dma_fence_put(fences[i]);
  266. }
  267. int sde_fence_register_hw_fences_wait(struct sde_hw_ctl *hw_ctl, struct dma_fence **fences,
  268. u32 num_fences)
  269. {
  270. struct sde_hw_fence_data *data;
  271. int i, j, ret;
  272. int ctl_id;
  273. struct dma_fence_array *temp_array = NULL;
  274. struct dma_fence *base_fence;
  275. struct dma_fence **hw_fences;
  276. u32 num_hw_fences;
  277. struct dma_fence **fence_list;
  278. struct dma_fence_array *array = NULL;
  279. int array_childs = 0;
  280. int array_count = 0;
  281. int fence_list_index = 0;
  282. u64 seqno;
  283. if (!hw_ctl) {
  284. SDE_ERROR("wrong ctl\n");
  285. return -EINVAL;
  286. }
  287. ctl_id = hw_ctl->idx - CTL_0;
  288. data = &hw_ctl->hwfence_data;
  289. if (IS_ERR_OR_NULL(data->hw_fence_handle)) {
  290. SDE_ERROR("unexpected handle for ctl_id:%d\n", ctl_id);
  291. return -EINVAL;
  292. }
  293. SDE_DEBUG("register for wait fences:%d ctl_id:%d hw_fence_client:%s\n",
  294. num_fences, ctl_id, _get_client_id_name(data->hw_fence_client_id));
  295. for (i = 0; i < num_fences; i++) {
  296. /* get a refcount for each of the fences */
  297. dma_fence_get(fences[i]);
  298. if (dma_fence_is_array(fences[i])) {
  299. array_count++;
  300. array = container_of(fences[i], struct dma_fence_array, base);
  301. array_childs += array->num_fences;
  302. }
  303. SDE_DEBUG("registering fence: ctx:%llu seqno:%llu\n",
  304. (fences[i])->context, (fences[i])->seqno);
  305. }
  306. if (num_fences > 1) {
  307. /* fence_list memory is freed during fence-array release */
  308. fence_list = kzalloc(((num_fences - array_count) + array_childs)
  309. * (sizeof(struct dma_fence *)), GFP_KERNEL);
  310. if (!fence_list) {
  311. _cleanup_fences_refcount(fences, num_fences);
  312. return -EINVAL;
  313. }
  314. /* populate fence_list with the fences */
  315. for (i = 0; i < num_fences; i++) {
  316. if (dma_fence_is_array(fences[i])) {
  317. array = container_of(fences[i], struct dma_fence_array, base);
  318. for (j = 0; j < array->num_fences; j++) {
  319. /* get a refcount for each of the child fences */
  320. dma_fence_get(array->fences[j]);
  321. fence_list[fence_list_index++] = array->fences[j];
  322. }
  323. if (array->num_fences) /* print the first fence from array */
  324. SDE_EVT32(ctl_id, num_fences, array->num_fences, i,
  325. SDE_EVTLOG_H32(array->fences[0]->context),
  326. SDE_EVTLOG_L32(array->fences[0]->context),
  327. SDE_EVTLOG_H32(array->fences[0]->seqno),
  328. SDE_EVTLOG_L32(array->fences[0]->seqno));
  329. else
  330. SDE_EVT32(ctl_id, num_fences, array->num_fences, i,
  331. SDE_EVTLOG_ERROR);
  332. /* remove refcount on parent */
  333. dma_fence_put(fences[i]);
  334. } else {
  335. fence_list[fence_list_index++] = fences[i];
  336. SDE_EVT32(ctl_id, num_fences, i, SDE_EVTLOG_H32(fences[i]->context),
  337. SDE_EVTLOG_L32(fences[i]->context),
  338. SDE_EVTLOG_H32(fences[i]->seqno),
  339. SDE_EVTLOG_L32(fences[i]->seqno));
  340. }
  341. }
  342. seqno = data->hw_fence_array_seqno++;
  343. temp_array = dma_fence_array_create(fence_list_index, fence_list,
  344. data->dma_context, seqno, 0);
  345. if (!temp_array) {
  346. SDE_ERROR("unable to create fence array, cant register for wait\n");
  347. _cleanup_fences_refcount(fences, num_fences);
  348. kfree(fence_list);
  349. return -EINVAL;
  350. }
  351. SDE_EVT32(ctl_id, fence_list_index, SDE_EVTLOG_H32(data->dma_context),
  352. SDE_EVTLOG_L32(data->dma_context), SDE_EVTLOG_H32(seqno),
  353. SDE_EVTLOG_L32(seqno));
  354. base_fence = &temp_array->base;
  355. hw_fences = &base_fence;
  356. num_hw_fences = 1;
  357. } else {
  358. struct dma_fence_array *tmp_array;
  359. hw_fences = fences;
  360. num_hw_fences = num_fences;
  361. tmp_array = dma_fence_is_array(fences[0]) ?
  362. container_of(fences[0], struct dma_fence_array, base) :
  363. NULL;
  364. SDE_EVT32(ctl_id, num_hw_fences, SDE_EVTLOG_H32(fences[0]->context),
  365. SDE_EVTLOG_L32(fences[0]->context), SDE_EVTLOG_H32(fences[0]->seqno),
  366. SDE_EVTLOG_L32(fences[0]->seqno), fences[0]->flags,
  367. tmp_array ? tmp_array->num_fences : SDE_EVTLOG_FUNC_CASE2);
  368. }
  369. /* register for wait */
  370. ret = msm_hw_fence_wait_update(data->hw_fence_handle, hw_fences, num_hw_fences, true);
  371. if (ret)
  372. SDE_ERROR("failed to register wait fences for ctl_id:%d ret:%d\n", ctl_id, ret);
  373. /* fence-array put will release each individual extra refcount during array release */
  374. if (temp_array)
  375. dma_fence_put(&temp_array->base);
  376. else
  377. dma_fence_put(fences[0]);
  378. SDE_EVT32_VERBOSE(ctl_id, num_fences, ret);
  379. return ret;
  380. }
  381. static int _arm_output_hw_fence(struct sde_hw_ctl *hw_ctl, bool vid_mode, u32 line_count,
  382. u32 debugfs_hw_fence)
  383. {
  384. struct sde_hw_fence_data *data;
  385. u32 ipcc_out_signal;
  386. int ctl_id;
  387. if (!hw_ctl || !hw_ctl->ops.hw_fence_trigger_output_fence ||
  388. !hw_ctl->ops.hw_fence_update_output_fence) {
  389. SDE_ERROR("missing ctl/trigger or update fence %d\n", !hw_ctl);
  390. return -EINVAL;
  391. }
  392. ctl_id = hw_ctl->idx - CTL_0;
  393. data = &hw_ctl->hwfence_data;
  394. if (data->ipcc_out_signal_pp_idx >= MAX_SDE_HFENCE_OUT_SIGNAL_PING_PONG) {
  395. /* This should not have happened!, review the ping pong calculation */
  396. SDE_ERROR("Wrong pp_idx:%d, max:%d\n", data->ipcc_out_signal_pp_idx,
  397. MAX_SDE_HFENCE_OUT_SIGNAL_PING_PONG);
  398. return -EINVAL;
  399. }
  400. ipcc_out_signal = data->ipcc_out_signal_pp[data->ipcc_out_signal_pp_idx];
  401. data->ipcc_out_signal_pp_idx = (++data->ipcc_out_signal_pp_idx %
  402. MAX_SDE_HFENCE_OUT_SIGNAL_PING_PONG);
  403. SDE_DEBUG("out-fence ctl_id:%d out_signal:%d hw_fence_client:%s\n",
  404. ctl_id, ipcc_out_signal, _get_client_id_name(data->hw_fence_client_id));
  405. if ((debugfs_hw_fence & SDE_OUTPUT_HW_FENCE_TIMESTAMP) &&
  406. hw_ctl->ops.hw_fence_output_timestamp_ctrl)
  407. hw_ctl->ops.hw_fence_output_timestamp_ctrl(hw_ctl, true, false);
  408. /* update client/signal output fence */
  409. hw_ctl->ops.hw_fence_update_output_fence(hw_ctl, data->ipcc_out_client, ipcc_out_signal);
  410. SDE_EVT32_VERBOSE(ctl_id, ipcc_out_signal);
  411. /* arm dpu to trigger output fence signal once ready */
  412. if (line_count)
  413. hw_ctl->ops.hw_fence_trigger_output_fence(hw_ctl,
  414. HW_FENCE_TRIGGER_SEL_PROG_LINE_COUNT);
  415. else if (vid_mode && (hw_ctl->caps->features & BIT(SDE_CTL_HW_FENCE_TRIGGER_SEL)))
  416. hw_ctl->ops.hw_fence_trigger_output_fence(hw_ctl, HW_FENCE_TRIGGER_SEL_VID_MODE);
  417. else
  418. hw_ctl->ops.hw_fence_trigger_output_fence(hw_ctl, HW_FENCE_TRIGGER_SEL_CMD_MODE);
  419. return 0;
  420. }
  421. static int _sde_fence_arm_output_hw_fence(struct sde_fence_context *ctx, bool vid_mode,
  422. u32 line_count, u32 debugfs_hw_fence)
  423. {
  424. struct sde_hw_ctl *hw_ctl = NULL;
  425. struct sde_fence *fc, *next;
  426. spin_lock(&ctx->list_lock);
  427. if (list_empty(&ctx->fence_list_head)) {
  428. spin_unlock(&ctx->list_lock);
  429. return 0;
  430. }
  431. list_for_each_entry_safe(fc, next, &ctx->fence_list_head, fence_list) {
  432. struct dma_fence *fence = &fc->base;
  433. /* this is not hw-fence, or already processed */
  434. if (!test_bit(MSM_HW_FENCE_FLAG_ENABLED_BIT, &fence->flags))
  435. continue;
  436. hw_ctl = fc->hwfence_out_ctl;
  437. if (!hw_ctl) {
  438. /*
  439. * We flaged an output dma-fence as hw-fence but the hw ctl to handle
  440. * it is not available, this should not have happened, but if it does,
  441. * this can translate to a fence-timeout!
  442. */
  443. SDE_ERROR("invalid hw ctl, this can cause a fence-timeout!\n");
  444. SDE_EVT32(SDE_EVTLOG_ERROR, SDE_EVTLOG_FUNC_CASE1, fence->flags,
  445. fence->context, fence->seqno);
  446. spin_unlock(&ctx->list_lock);
  447. return -EINVAL;
  448. }
  449. }
  450. spin_unlock(&ctx->list_lock);
  451. /* arm dpu to trigger output hw-fence ipcc signal upon completion */
  452. if (hw_ctl)
  453. _arm_output_hw_fence(hw_ctl, vid_mode, line_count, debugfs_hw_fence);
  454. return 0;
  455. }
  456. void sde_fence_output_hw_fence_dir_write_init(struct sde_hw_ctl *hw_ctl)
  457. {
  458. if (hw_ctl && hw_ctl->ops.hw_fence_output_fence_dir_write_init)
  459. hw_ctl->ops.hw_fence_output_fence_dir_write_init(hw_ctl,
  460. hw_ctl->hwfence_data.txq_wr_ptr_pa, HW_FENCE_DIR_WRITE_SIZE,
  461. HW_FENCE_DIR_WRITE_MASK);
  462. }
  463. /* update output hw_fences txq */
  464. int sde_fence_update_hw_fences_txq(struct sde_fence_context *ctx, bool vid_mode, u32 line_count,
  465. u32 debugfs_hw_fence)
  466. {
  467. int ret = 0;
  468. struct sde_hw_fence_data *data;
  469. struct sde_fence *fc, *next;
  470. struct sde_hw_ctl *hw_ctl = NULL;
  471. int ctl_id;
  472. bool txq_updated = false;
  473. spin_lock(&ctx->list_lock);
  474. if (list_empty(&ctx->fence_list_head)) {
  475. spin_unlock(&ctx->list_lock);
  476. return 0;
  477. }
  478. list_for_each_entry_safe(fc, next, &ctx->fence_list_head, fence_list) {
  479. struct dma_fence *fence = &fc->base;
  480. /* this is not hw-fence, or already processed */
  481. if (!test_bit(MSM_HW_FENCE_FLAG_ENABLED_BIT, &fence->flags) ||
  482. fc->txq_updated_fence)
  483. continue;
  484. hw_ctl = fc->hwfence_out_ctl;
  485. if (!hw_ctl) {
  486. /* We flaged an output dma-fence as hw-fence but the hw ctl to handle
  487. * it is not available, this should not have happened, but if it does,
  488. * this can translate to a fence-timeout!
  489. */
  490. SDE_ERROR("invalid hw ctl, this can cause a fence-timeout!\n");
  491. SDE_EVT32(SDE_EVTLOG_FUNC_CASE1, fence->flags, fence->context,
  492. fence->seqno, SDE_EVTLOG_ERROR);
  493. ret = -EINVAL;
  494. goto exit;
  495. }
  496. ctl_id = hw_ctl->idx - CTL_0;
  497. data = &hw_ctl->hwfence_data;
  498. if (IS_ERR_OR_NULL(data->hw_fence_handle)) {
  499. SDE_ERROR("unexpected handle for ctl_id:%d, this can fence-timeout\n",
  500. ctl_id);
  501. SDE_EVT32(SDE_EVTLOG_FUNC_CASE2, fence->flags, fence->context,
  502. fence->seqno, ctl_id, SDE_EVTLOG_ERROR);
  503. ret = -EINVAL;
  504. goto exit;
  505. }
  506. /* update hw-fence tx queue */
  507. SDE_EVT32(ctl_id, SDE_EVTLOG_H32(fc->hwfence_index),
  508. SDE_EVTLOG_L32(fc->hwfence_index), *data->txq_tx_wm_va);
  509. ret = msm_hw_fence_update_txq(data->hw_fence_handle, fc->hwfence_index, 0, 0);
  510. if (ret) {
  511. SDE_ERROR("fail txq update index:%llu fctx:%llu seqno:%llu client:%d\n",
  512. fc->hwfence_index, fence->context, fence->seqno,
  513. data->hw_fence_client_id);
  514. SDE_EVT32(SDE_EVTLOG_FUNC_CASE3, fence->flags, fence->context,
  515. fence->seqno, ctl_id, SDE_EVTLOG_ERROR);
  516. goto exit;
  517. }
  518. /* update hw-fence tx queue wr_idx data */
  519. if (hw_ctl->ops.hw_fence_output_fence_dir_write_data)
  520. hw_ctl->ops.hw_fence_output_fence_dir_write_data(hw_ctl,
  521. *data->txq_tx_wm_va);
  522. /* avoid updating txq more than once and avoid repeating the same fence twice */
  523. txq_updated = fc->txq_updated_fence = true;
  524. SDE_DEBUG("update txq fence:0x%pK ctx:%llu seqno:%llu f:0x%llx ctl:%d vid:%d\n",
  525. fence, fence->context, fence->seqno, fence->flags, ctl_id, vid_mode);
  526. /* We will update TxQ one time per frame */
  527. if (txq_updated)
  528. break;
  529. }
  530. exit:
  531. spin_unlock(&ctx->list_lock);
  532. /* arm dpu to trigger output hw-fence ipcc signal upon completion in vid-mode */
  533. if ((txq_updated && hw_ctl) || line_count)
  534. _sde_fence_arm_output_hw_fence(ctx, vid_mode, line_count, debugfs_hw_fence);
  535. return ret;
  536. }
  537. static void _sde_hw_fence_release(struct sde_fence *f)
  538. {
  539. struct sde_hw_fence_data *data;
  540. struct sde_hw_ctl *hw_ctl = f->hwfence_out_ctl;
  541. int ctl_id;
  542. int ret;
  543. if (!hw_ctl) {
  544. SDE_ERROR("invalid hw_ctl\n");
  545. return;
  546. }
  547. ctl_id = hw_ctl->idx - CTL_0;
  548. data = &hw_ctl->hwfence_data;
  549. if (IS_ERR_OR_NULL(data->hw_fence_handle)) {
  550. SDE_ERROR("unexpected handle for ctl_id:%d\n", ctl_id);
  551. return;
  552. }
  553. SDE_DEBUG("destroy hw fence ctl_id:%d ctx:%llu seqno:%llu name:%s\n",
  554. ctl_id, f->base.context, f->base.seqno, f->name);
  555. /* Delete the HW fence */
  556. ret = msm_hw_fence_destroy(data->hw_fence_handle, &f->base);
  557. if (ret)
  558. SDE_ERROR("failed to destroy hw_fence for ctl_id:%d ctx:%llu seqno:%llu\n", ctl_id,
  559. f->base.context, f->base.seqno);
  560. }
  561. static int _reset_hw_fence_timeline(struct sde_hw_ctl *hw_ctl, u32 flags)
  562. {
  563. struct sde_hw_fence_data *data;
  564. int ret = 0;
  565. data = &hw_ctl->hwfence_data;
  566. if (!IS_ERR_OR_NULL(data->hw_fence_handle)) {
  567. SDE_EVT32(data->hw_fence_client_id);
  568. ret = msm_hw_fence_reset_client(data->hw_fence_handle, flags);
  569. if (ret) {
  570. pr_err("failed to reset client %d\n", data->hw_fence_client_id);
  571. return -EINVAL;
  572. }
  573. }
  574. return ret;
  575. }
  576. int sde_fence_update_input_hw_fence_signal(struct sde_hw_ctl *hw_ctl, u32 debugfs_hw_fence,
  577. struct sde_hw_mdp *hw_mdp, bool disable)
  578. {
  579. struct sde_hw_fence_data *data;
  580. u32 ipcc_signal_id;
  581. u32 ipcc_client_id;
  582. int ctl_id;
  583. u64 qtime;
  584. /* we must support sw_override as well, so check both functions */
  585. if (!hw_mdp || !hw_ctl || !hw_ctl->ops.hw_fence_update_input_fence ||
  586. !hw_ctl->ops.hw_fence_trigger_sw_override) {
  587. SDE_ERROR("missing ctl/override/update fence %d\n", !hw_ctl);
  588. return -EINVAL;
  589. }
  590. ctl_id = hw_ctl->idx - CTL_0;
  591. data = &hw_ctl->hwfence_data;
  592. if (disable) {
  593. hw_ctl->ops.hw_fence_ctrl(hw_ctl, false, false, 0);
  594. return -EPERM;
  595. }
  596. if ((debugfs_hw_fence & SDE_INPUT_HW_FENCE_TIMESTAMP)
  597. && hw_mdp->ops.hw_fence_input_timestamp_ctrl)
  598. hw_mdp->ops.hw_fence_input_timestamp_ctrl(hw_mdp, true, false);
  599. ipcc_signal_id = data->ipcc_in_signal;
  600. ipcc_client_id = data->ipcc_in_client;
  601. SDE_DEBUG("configure input signal:%d out client:%d ctl_id:%d\n", ipcc_signal_id,
  602. ipcc_client_id, ctl_id);
  603. /* configure dpu hw for the client/signal pair signaling input-fence */
  604. hw_ctl->ops.hw_fence_update_input_fence(hw_ctl, ipcc_client_id, ipcc_signal_id);
  605. /* Enable hw-fence for this ctrl-path */
  606. hw_ctl->ops.hw_fence_ctrl(hw_ctl, true, true, 1);
  607. qtime = arch_timer_read_counter();
  608. SDE_EVT32(ctl_id, ipcc_signal_id, ipcc_client_id, SDE_EVTLOG_H32(qtime),
  609. SDE_EVTLOG_L32(qtime));
  610. return 0;
  611. }
  612. void *sde_sync_get(uint64_t fd)
  613. {
  614. /* force signed compare, fdget accepts an int argument */
  615. return (signed int)fd >= 0 ? sync_file_get_fence(fd) : NULL;
  616. }
  617. void sde_sync_put(void *fence)
  618. {
  619. if (fence)
  620. dma_fence_put(fence);
  621. }
  622. void sde_fence_dump(struct dma_fence *fence)
  623. {
  624. char timeline_str[TIMELINE_VAL_LENGTH];
  625. if (fence->ops->timeline_value_str)
  626. fence->ops->timeline_value_str(fence, timeline_str, TIMELINE_VAL_LENGTH);
  627. SDE_ERROR(
  628. "fence drv name:%s timeline name:%s seqno:0x%llx timeline:%s signaled:0x%x status:%d flags:0x%x\n",
  629. fence->ops->get_driver_name(fence),
  630. fence->ops->get_timeline_name(fence),
  631. fence->seqno, timeline_str,
  632. fence->ops->signaled ?
  633. fence->ops->signaled(fence) : 0xffffffff,
  634. dma_fence_get_status(fence), fence->flags);
  635. }
  636. static void sde_fence_dump_user_fds_info(struct dma_fence *base_fence)
  637. {
  638. struct dma_fence_array *array;
  639. struct dma_fence *user_fence;
  640. int i;
  641. array = container_of(base_fence, struct dma_fence_array, base);
  642. if (test_bit(SPEC_FENCE_FLAG_FENCE_ARRAY, &base_fence->flags) &&
  643. test_bit(SPEC_FENCE_FLAG_ARRAY_BIND, &base_fence->flags)) {
  644. for (i = 0; i < array->num_fences; i++) {
  645. user_fence = array->fences[i];
  646. if (user_fence) {
  647. dma_fence_get(user_fence);
  648. sde_fence_dump(user_fence);
  649. dma_fence_put(user_fence);
  650. }
  651. }
  652. }
  653. }
  654. signed long sde_sync_wait(void *fnc, long timeout_ms)
  655. {
  656. struct dma_fence *fence = fnc;
  657. int rc, status = 0;
  658. if (!fence)
  659. return -EINVAL;
  660. else if (dma_fence_is_signaled(fence))
  661. return timeout_ms ? msecs_to_jiffies(timeout_ms) : 1;
  662. rc = dma_fence_wait_timeout(fence, true,
  663. msecs_to_jiffies(timeout_ms));
  664. if (!rc || (rc == -EINVAL) || fence->error) {
  665. status = dma_fence_get_status(fence);
  666. if (test_bit(SPEC_FENCE_FLAG_FENCE_ARRAY, &fence->flags)) {
  667. if (status == -EINVAL) {
  668. SDE_INFO("spec fence bind failure status:%d\n", status);
  669. rc = -EBADF;
  670. } else if (fence->ops->signaled && fence->ops->signaled(fence)) {
  671. SDE_INFO("spec fence status:%d\n", status);
  672. } else {
  673. sde_fence_dump(fence);
  674. sde_fence_dump_user_fds_info(fence);
  675. }
  676. } else {
  677. sde_fence_dump(fence);
  678. }
  679. }
  680. return rc;
  681. }
  682. uint32_t sde_sync_get_name_prefix(void *fence)
  683. {
  684. const char *name;
  685. uint32_t i, prefix;
  686. struct dma_fence *f = fence;
  687. if (!fence)
  688. return 0;
  689. name = f->ops->get_driver_name(f);
  690. if (!name)
  691. return 0;
  692. prefix = 0x0;
  693. for (i = 0; i < sizeof(uint32_t) && name[i]; ++i)
  694. prefix = (prefix << CHAR_BIT) | name[i];
  695. return prefix;
  696. }
  697. static void sde_fence_destroy(struct kref *kref)
  698. {
  699. struct sde_fence_context *ctx;
  700. if (!kref) {
  701. SDE_ERROR("received invalid kref\n");
  702. return;
  703. }
  704. ctx = container_of(kref, struct sde_fence_context, kref);
  705. kfree(ctx);
  706. }
  707. static inline struct sde_fence *to_sde_fence(struct dma_fence *fence)
  708. {
  709. return container_of(fence, struct sde_fence, base);
  710. }
  711. static const char *sde_fence_get_driver_name(struct dma_fence *fence)
  712. {
  713. struct sde_fence *f = to_sde_fence(fence);
  714. return f->name;
  715. }
  716. static const char *sde_fence_get_timeline_name(struct dma_fence *fence)
  717. {
  718. struct sde_fence *f = to_sde_fence(fence);
  719. return f->ctx->name;
  720. }
  721. static bool sde_fence_enable_signaling(struct dma_fence *fence)
  722. {
  723. return true;
  724. }
  725. static bool sde_fence_signaled(struct dma_fence *fence)
  726. {
  727. struct sde_fence *f = to_sde_fence(fence);
  728. bool status;
  729. status = ((int)(fence->seqno - f->ctx->done_count) <= 0);
  730. SDE_DEBUG("status:%d fence seq:%llu and timeline:%u\n",
  731. status, fence->seqno, f->ctx->done_count);
  732. return status;
  733. }
  734. static void sde_fence_release(struct dma_fence *fence)
  735. {
  736. struct sde_fence *f;
  737. if (fence) {
  738. f = to_sde_fence(fence);
  739. /* Delete the HW fence */
  740. if (test_bit(MSM_HW_FENCE_FLAG_ENABLED_BIT, &fence->flags))
  741. _sde_hw_fence_release(f);
  742. kref_put(&f->ctx->kref, sde_fence_destroy);
  743. kfree(f);
  744. }
  745. }
  746. static void sde_fence_value_str(struct dma_fence *fence, char *str, int size)
  747. {
  748. if (!fence || !str)
  749. return;
  750. snprintf(str, size, "%llu", fence->seqno);
  751. }
  752. static void sde_fence_timeline_value_str(struct dma_fence *fence, char *str,
  753. int size)
  754. {
  755. struct sde_fence *f = to_sde_fence(fence);
  756. if (!fence || !f->ctx || !str)
  757. return;
  758. snprintf(str, size, "%d", f->ctx->done_count);
  759. }
  760. static struct dma_fence_ops sde_fence_ops = {
  761. .get_driver_name = sde_fence_get_driver_name,
  762. .get_timeline_name = sde_fence_get_timeline_name,
  763. .enable_signaling = sde_fence_enable_signaling,
  764. .signaled = sde_fence_signaled,
  765. .wait = dma_fence_default_wait,
  766. .release = sde_fence_release,
  767. .fence_value_str = sde_fence_value_str,
  768. .timeline_value_str = sde_fence_timeline_value_str,
  769. };
  770. /**
  771. * _sde_fence_create_fd - create fence object and return an fd for it
  772. * This function is NOT thread-safe.
  773. * @timeline: Timeline to associate with fence
  774. * @val: Timeline value at which to signal the fence
  775. * Return: File descriptor on success, or error code on error
  776. */
  777. static int _sde_fence_create_fd(void *fence_ctx, uint32_t val, struct sde_hw_ctl *hw_ctl)
  778. {
  779. struct sde_fence *sde_fence;
  780. struct sync_file *sync_file;
  781. signed int fd = -EINVAL;
  782. struct sde_fence_context *ctx = fence_ctx;
  783. if (!ctx) {
  784. SDE_ERROR("invalid context\n");
  785. goto exit;
  786. }
  787. sde_fence = kzalloc(sizeof(*sde_fence), GFP_KERNEL);
  788. if (!sde_fence)
  789. return -ENOMEM;
  790. sde_fence->ctx = fence_ctx;
  791. snprintf(sde_fence->name, SDE_FENCE_NAME_SIZE, "sde_fence:%s:%u",
  792. sde_fence->ctx->name, val);
  793. dma_fence_init(&sde_fence->base, &sde_fence_ops, &ctx->lock,
  794. ctx->context, val);
  795. kref_get(&ctx->kref);
  796. /* create fd */
  797. fd = get_unused_fd_flags(0);
  798. if (fd < 0) {
  799. SDE_ERROR("failed to get_unused_fd_flags(), %s\n",
  800. sde_fence->name);
  801. dma_fence_put(&sde_fence->base);
  802. goto exit;
  803. }
  804. /* create fence */
  805. sync_file = sync_file_create(&sde_fence->base);
  806. if (sync_file == NULL) {
  807. put_unused_fd(fd);
  808. fd = -EINVAL;
  809. SDE_ERROR("couldn't create fence, %s\n", sde_fence->name);
  810. dma_fence_put(&sde_fence->base);
  811. goto exit;
  812. }
  813. /* If ctl_id is valid, try to create a hw-fence */
  814. if (hw_ctl)
  815. sde_fence_create_hw_fence(hw_ctl, sde_fence);
  816. fd_install(fd, sync_file->file);
  817. sde_fence->fd = fd;
  818. spin_lock(&ctx->list_lock);
  819. list_add_tail(&sde_fence->fence_list, &ctx->fence_list_head);
  820. spin_unlock(&ctx->list_lock);
  821. exit:
  822. return fd;
  823. }
  824. struct sde_fence_context *sde_fence_init(const char *name, uint32_t drm_id)
  825. {
  826. struct sde_fence_context *ctx;
  827. if (!name) {
  828. SDE_ERROR("invalid argument(s)\n");
  829. return ERR_PTR(-EINVAL);
  830. }
  831. ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
  832. if (!ctx) {
  833. SDE_ERROR("failed to alloc fence ctx\n");
  834. return ERR_PTR(-ENOMEM);
  835. }
  836. strlcpy(ctx->name, name, ARRAY_SIZE(ctx->name));
  837. ctx->drm_id = drm_id;
  838. kref_init(&ctx->kref);
  839. ctx->context = dma_fence_context_alloc(1);
  840. spin_lock_init(&ctx->lock);
  841. spin_lock_init(&ctx->list_lock);
  842. INIT_LIST_HEAD(&ctx->fence_list_head);
  843. return ctx;
  844. }
  845. void sde_fence_deinit(struct sde_fence_context *ctx)
  846. {
  847. if (!ctx) {
  848. SDE_ERROR("invalid fence\n");
  849. return;
  850. }
  851. kref_put(&ctx->kref, sde_fence_destroy);
  852. }
  853. void sde_fence_prepare(struct sde_fence_context *ctx)
  854. {
  855. unsigned long flags;
  856. if (!ctx) {
  857. SDE_ERROR("invalid argument(s), fence %pK\n", ctx);
  858. } else {
  859. spin_lock_irqsave(&ctx->lock, flags);
  860. ++ctx->commit_count;
  861. spin_unlock_irqrestore(&ctx->lock, flags);
  862. }
  863. }
  864. static void _sde_fence_trigger(struct sde_fence_context *ctx, bool error, ktime_t ts)
  865. {
  866. unsigned long flags;
  867. struct sde_fence *fc, *next;
  868. bool is_signaled = false;
  869. kref_get(&ctx->kref);
  870. spin_lock(&ctx->list_lock);
  871. if (list_empty(&ctx->fence_list_head)) {
  872. SDE_DEBUG("nothing to trigger!\n");
  873. goto end;
  874. }
  875. list_for_each_entry_safe(fc, next, &ctx->fence_list_head, fence_list) {
  876. spin_lock_irqsave(&ctx->lock, flags);
  877. if (error)
  878. dma_fence_set_error(&fc->base, -EBUSY);
  879. is_signaled = sde_fence_signaled(&fc->base);
  880. if (is_signaled)
  881. dma_fence_signal_timestamp_locked(&fc->base, ts);
  882. spin_unlock_irqrestore(&ctx->lock, flags);
  883. if (is_signaled) {
  884. list_del_init(&fc->fence_list);
  885. dma_fence_put(&fc->base);
  886. }
  887. }
  888. end:
  889. spin_unlock(&ctx->list_lock);
  890. kref_put(&ctx->kref, sde_fence_destroy);
  891. }
  892. int sde_fence_create(struct sde_fence_context *ctx, uint64_t *val,
  893. uint32_t offset, struct sde_hw_ctl *hw_ctl)
  894. {
  895. uint32_t trigger_value;
  896. int fd, rc = -EINVAL;
  897. unsigned long flags;
  898. if (!ctx || !val) {
  899. SDE_ERROR("invalid argument(s), fence %d, pval %d\n",
  900. ctx != NULL, val != NULL);
  901. return rc;
  902. }
  903. /*
  904. * Allow created fences to have a constant offset with respect
  905. * to the timeline. This allows us to delay the fence signalling
  906. * w.r.t. the commit completion (e.g., an offset of +1 would
  907. * cause fences returned during a particular commit to signal
  908. * after an additional delay of one commit, rather than at the
  909. * end of the current one.
  910. */
  911. spin_lock_irqsave(&ctx->lock, flags);
  912. trigger_value = ctx->commit_count + offset;
  913. spin_unlock_irqrestore(&ctx->lock, flags);
  914. fd = _sde_fence_create_fd(ctx, trigger_value, hw_ctl);
  915. *val = fd;
  916. SDE_DEBUG("fd:%d trigger:%d commit:%d offset:%d\n",
  917. fd, trigger_value, ctx->commit_count, offset);
  918. SDE_EVT32(ctx->drm_id, trigger_value, fd, hw_ctl ? hw_ctl->idx : 0);
  919. rc = (fd >= 0) ? 0 : fd;
  920. return rc;
  921. }
  922. void sde_fence_signal(struct sde_fence_context *ctx, ktime_t ts,
  923. enum sde_fence_event fence_event, struct sde_hw_ctl *hw_ctl)
  924. {
  925. unsigned long flags;
  926. if (!ctx) {
  927. SDE_ERROR("invalid ctx, %pK\n", ctx);
  928. return;
  929. }
  930. spin_lock_irqsave(&ctx->lock, flags);
  931. if (fence_event == SDE_FENCE_RESET_TIMELINE) {
  932. /* reset hw-fences without error */
  933. if (hw_ctl)
  934. _reset_hw_fence_timeline(hw_ctl, MSM_HW_FENCE_RESET_WITHOUT_ERROR |
  935. MSM_HW_FENCE_RESET_WITHOUT_DESTROY);
  936. if ((int)(ctx->done_count - ctx->commit_count) < 0) {
  937. SDE_DEBUG(
  938. "timeline reset attempt! ctx:0x%x done count:%d commit:%d\n",
  939. ctx->drm_id, ctx->done_count, ctx->commit_count);
  940. ctx->done_count = ctx->commit_count;
  941. SDE_EVT32(ctx->drm_id, ctx->done_count,
  942. ctx->commit_count, ktime_to_us(ts),
  943. fence_event, SDE_EVTLOG_FUNC_CASE1);
  944. } else {
  945. spin_unlock_irqrestore(&ctx->lock, flags);
  946. return;
  947. }
  948. } else if ((int)(ctx->done_count - ctx->commit_count) < 0) {
  949. ++ctx->done_count;
  950. SDE_DEBUG("fence_signal:done count:%d commit count:%d\n",
  951. ctx->done_count, ctx->commit_count);
  952. } else {
  953. SDE_ERROR("extra signal attempt! done count:%d commit:%d\n",
  954. ctx->done_count, ctx->commit_count);
  955. SDE_EVT32(ctx->drm_id, ctx->done_count, ctx->commit_count,
  956. ktime_to_us(ts), fence_event, SDE_EVTLOG_FATAL);
  957. spin_unlock_irqrestore(&ctx->lock, flags);
  958. return;
  959. }
  960. spin_unlock_irqrestore(&ctx->lock, flags);
  961. SDE_EVT32(ctx->drm_id, ctx->done_count, ctx->commit_count,
  962. ktime_to_us(ts));
  963. _sde_fence_trigger(ctx, (fence_event == SDE_FENCE_SIGNAL_ERROR), ts);
  964. }
  965. void sde_fence_timeline_status(struct sde_fence_context *ctx,
  966. struct drm_mode_object *drm_obj)
  967. {
  968. char *obj_name;
  969. if (!ctx || !drm_obj) {
  970. SDE_ERROR("invalid input params\n");
  971. return;
  972. }
  973. switch (drm_obj->type) {
  974. case DRM_MODE_OBJECT_CRTC:
  975. obj_name = "crtc";
  976. break;
  977. case DRM_MODE_OBJECT_CONNECTOR:
  978. obj_name = "connector";
  979. break;
  980. default:
  981. obj_name = "unknown";
  982. break;
  983. }
  984. SDE_ERROR("drm obj:%s id:%d type:0x%x done_count:%d commit_count:%d\n",
  985. obj_name, drm_obj->id, drm_obj->type, ctx->done_count,
  986. ctx->commit_count);
  987. }
  988. void sde_fence_list_dump(struct dma_fence *fence, struct seq_file **s)
  989. {
  990. char timeline_str[TIMELINE_VAL_LENGTH];
  991. if (fence->ops->timeline_value_str)
  992. fence->ops->timeline_value_str(fence,
  993. timeline_str, TIMELINE_VAL_LENGTH);
  994. seq_printf(*s, "fence name:%s timeline name:%s seqno:0x%llx timeline:%s signaled:0x%x\n",
  995. fence->ops->get_driver_name(fence),
  996. fence->ops->get_timeline_name(fence),
  997. fence->seqno, timeline_str,
  998. fence->ops->signaled ?
  999. fence->ops->signaled(fence) : 0xffffffff);
  1000. }
  1001. void sde_debugfs_timeline_dump(struct sde_fence_context *ctx,
  1002. struct drm_mode_object *drm_obj, struct seq_file **s)
  1003. {
  1004. char *obj_name;
  1005. struct sde_fence *fc, *next;
  1006. struct dma_fence *fence;
  1007. if (!ctx || !drm_obj) {
  1008. SDE_ERROR("invalid input params\n");
  1009. return;
  1010. }
  1011. switch (drm_obj->type) {
  1012. case DRM_MODE_OBJECT_CRTC:
  1013. obj_name = "crtc";
  1014. break;
  1015. case DRM_MODE_OBJECT_CONNECTOR:
  1016. obj_name = "connector";
  1017. break;
  1018. default:
  1019. obj_name = "unknown";
  1020. break;
  1021. }
  1022. seq_printf(*s, "drm obj:%s id:%d type:0x%x done_count:%d commit_count:%d\n",
  1023. obj_name, drm_obj->id, drm_obj->type, ctx->done_count,
  1024. ctx->commit_count);
  1025. spin_lock(&ctx->list_lock);
  1026. list_for_each_entry_safe(fc, next, &ctx->fence_list_head, fence_list) {
  1027. fence = &fc->base;
  1028. sde_fence_list_dump(fence, s);
  1029. }
  1030. spin_unlock(&ctx->list_lock);
  1031. }