sde_fence.c 34 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
  4. * Copyright (c) 2016-2021, The Linux Foundation. All rights reserved.
  5. */
  6. #define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
  7. #include <linux/sync_file.h>
  8. #include <linux/dma-fence.h>
  9. #include <linux/dma-fence-array.h>
  10. #include <linux/file.h>
  11. #include "msm_drv.h"
  12. #include "sde_kms.h"
  13. #include "sde_fence.h"
  14. #define TIMELINE_VAL_LENGTH 128
  15. #define SPEC_FENCE_FLAG_FENCE_ARRAY 0x10
  16. #define SPEC_FENCE_FLAG_ARRAY_BIND 0x11
  17. #define HW_FENCE_DIR_WRITE_SIZE 0x2
  18. #define HW_FENCE_DIR_WRITE_MASK 0xFFFFFFFF
  19. #define HW_FENCE_HFI_MMAP_DPU_BA 0x200000
  20. /**
  21. * struct sde_fence - release/retire fence structure
  22. * @base: base fence structure
  23. * @ctx: fence context
  24. * @name: name of each fence- it is fence timeline + commit_count
  25. * @fence_list: list to associated this fence on timeline/context
  26. * @fd: fd attached to this fence - debugging purpose.
  27. * @hwfence_out_ctl: hw ctl for the output fence
  28. * @hwfence_index: hw fence index for this fence
  29. * @txq_updated_fence: flag to indicate that a fence has been updated in txq
  30. */
  31. struct sde_fence {
  32. struct dma_fence base;
  33. struct sde_fence_context *ctx;
  34. char name[SDE_FENCE_NAME_SIZE];
  35. struct list_head fence_list;
  36. int fd;
  37. struct sde_hw_ctl *hwfence_out_ctl;
  38. u64 hwfence_index;
  39. bool txq_updated_fence;
  40. };
  41. /**
  42. * enum sde_hw_fence_clients - sde clients for the hw-fence feature
  43. *
  44. * Do not modify the order of this struct and/or add more elements
  45. * without modify/add fields in the 'hw_fence_data' structs.
  46. */
  47. enum sde_hw_fence_clients {
  48. SDE_HW_FENCE_CLIENT_CTL_0,
  49. SDE_HW_FENCE_CLIENT_CTL_1,
  50. SDE_HW_FENCE_CLIENT_CTL_2,
  51. SDE_HW_FENCE_CLIENT_CTL_3,
  52. SDE_HW_FENCE_CLIENT_CTL_4,
  53. SDE_HW_FENCE_CLIENT_CTL_5,
  54. SDE_HW_FENCE_CLIENT_MAX,
  55. };
  56. /**
  57. * hw_fence_data_dpu_client - this table maps the dpu ipcc input and output signals for each display
  58. * clients to communicate with the fence controller.
  59. * This struct must match the order of the 'sde_hw_fence_clients' enum,
  60. * the output signal must match with the signals that FenceCTL expects for each display client.
  61. * This 'hw_fence_data_dpu_client' must be used for HW that does not support dpu-signal.
  62. */
  63. struct sde_hw_fence_data hw_fence_data_no_dpu[SDE_HW_FENCE_CLIENT_MAX] = {
  64. {SDE_HW_FENCE_CLIENT_CTL_0, HW_FENCE_CLIENT_ID_CTL0, NULL, {0}, NULL, NULL, 8, 14, {2, 3},
  65. 0, 8, 8, 0, 0},
  66. {SDE_HW_FENCE_CLIENT_CTL_1, HW_FENCE_CLIENT_ID_CTL1, NULL, {0}, NULL, NULL, 8, 15, {4, 5},
  67. 0, 8, 8, 0, 0},
  68. {SDE_HW_FENCE_CLIENT_CTL_2, HW_FENCE_CLIENT_ID_CTL2, NULL, {0}, NULL, NULL, 8, 16, {6, 7},
  69. 0, 8, 8, 0, 0},
  70. {SDE_HW_FENCE_CLIENT_CTL_3, HW_FENCE_CLIENT_ID_CTL3, NULL, {0}, NULL, NULL, 8, 17, {8, 9},
  71. 0, 8, 8, 0, 0},
  72. {SDE_HW_FENCE_CLIENT_CTL_4, HW_FENCE_CLIENT_ID_CTL4, NULL, {0}, NULL, NULL, 8, 18, {10, 11},
  73. 0, 8, 8, 0, 0},
  74. {SDE_HW_FENCE_CLIENT_CTL_5, HW_FENCE_CLIENT_ID_CTL5, NULL, {0}, NULL, NULL, 8, 19, {12, 13},
  75. 0, 8, 8, 0, 0}
  76. };
  77. /**
  78. * hw_fence_data_dpu_client - this table maps the dpu ipcc input and output signals for each display
  79. * clients to communicate with the fence controller.
  80. * This struct must match the order of the 'sde_hw_fence_clients' enum,
  81. * the output signal must match with the signals that FenceCTL expects for each display client.
  82. * This 'hw_fence_data_dpu_client' must be used for HW that supports dpu-signal
  83. */
  84. struct sde_hw_fence_data hw_fence_data_dpu_client[SDE_HW_FENCE_CLIENT_MAX] = {
  85. {SDE_HW_FENCE_CLIENT_CTL_0, HW_FENCE_CLIENT_ID_CTL0, NULL, {0}, NULL, NULL, 8, 0, {0, 6},
  86. 0, 8, 25, 0, 0},
  87. {SDE_HW_FENCE_CLIENT_CTL_1, HW_FENCE_CLIENT_ID_CTL1, NULL, {0}, NULL, NULL, 8, 1, {1, 7},
  88. 0, 8, 25, 0, 0},
  89. {SDE_HW_FENCE_CLIENT_CTL_2, HW_FENCE_CLIENT_ID_CTL2, NULL, {0}, NULL, NULL, 8, 2, {2, 8},
  90. 0, 8, 25, 0, 0},
  91. {SDE_HW_FENCE_CLIENT_CTL_3, HW_FENCE_CLIENT_ID_CTL3, NULL, {0}, NULL, NULL, 8, 3, {3, 9},
  92. 0, 8, 25, 0, 0},
  93. {SDE_HW_FENCE_CLIENT_CTL_4, HW_FENCE_CLIENT_ID_CTL4, NULL, {0}, NULL, NULL, 8, 4, {4, 10},
  94. 0, 8, 25, 0, 0},
  95. {SDE_HW_FENCE_CLIENT_CTL_5, HW_FENCE_CLIENT_ID_CTL5, NULL, {0}, NULL, NULL, 8, 5, {5, 11},
  96. 0, 8, 25, 0, 0}
  97. };
  98. int sde_hw_fence_init(struct sde_hw_ctl *hw_ctl, bool use_dpu_ipcc, struct msm_mmu *mmu)
  99. {
  100. struct msm_hw_fence_hfi_queue_header *hfi_queue_header_va, *hfi_queue_header_pa;
  101. struct msm_hw_fence_hfi_queue_table_header *hfi_table_header;
  102. struct sde_hw_fence_data *sde_hw_fence_data;
  103. struct sde_hw_fence_data *hwfence_data;
  104. phys_addr_t queue_pa;
  105. void *queue_va;
  106. u32 qhdr0_offset, ctl_hfi_iova;
  107. int ctl_id, ret;
  108. if (!hw_ctl || !hw_ctl->ops.hw_fence_output_fence_dir_write_init)
  109. return -EINVAL;
  110. ctl_id = hw_ctl->idx - CTL_0;
  111. if (ctl_id >= SDE_HW_FENCE_CLIENT_MAX || ctl_id < 0) {
  112. SDE_ERROR("unexpected ctl_id:%d\n", ctl_id);
  113. return -EINVAL;
  114. }
  115. hwfence_data = &hw_ctl->hwfence_data;
  116. sde_hw_fence_data = use_dpu_ipcc ? hw_fence_data_dpu_client : hw_fence_data_no_dpu;
  117. if (sde_hw_fence_data[ctl_id].client_id != ctl_id) {
  118. SDE_ERROR("Unexpected client_id:%d for ctl_id:%d\n",
  119. sde_hw_fence_data[ctl_id].client_id, ctl_id);
  120. return -EINVAL;
  121. }
  122. /* init the default fence-data for this client */
  123. memcpy(hwfence_data, &sde_hw_fence_data[ctl_id], sizeof(struct sde_hw_fence_data));
  124. SDE_DEBUG("hwfence register ctl:%d client:%d\n", ctl_id, hwfence_data->hw_fence_client_id);
  125. hwfence_data->hw_fence_handle = msm_hw_fence_register(hwfence_data->hw_fence_client_id,
  126. &hwfence_data->mem_descriptor);
  127. hwfence_data->dma_context = dma_fence_context_alloc(1);
  128. if (IS_ERR_OR_NULL(hwfence_data->hw_fence_handle)) {
  129. hwfence_data->hw_fence_handle = NULL;
  130. SDE_DEBUG("error cannot register ctl_id:%d hw-fence client:%d\n", ctl_id,
  131. hwfence_data->hw_fence_client_id);
  132. return -EINVAL;
  133. }
  134. /* one-to-one memory map of ctl-path client queues */
  135. ctl_hfi_iova = HW_FENCE_HFI_MMAP_DPU_BA +
  136. PAGE_ALIGN(hwfence_data->mem_descriptor.size * ctl_id);
  137. ret = mmu->funcs->one_to_one_map(mmu, ctl_hfi_iova,
  138. hwfence_data->mem_descriptor.device_addr,
  139. hwfence_data->mem_descriptor.size, IOMMU_READ | IOMMU_WRITE);
  140. if (ret) {
  141. SDE_ERROR("queue one2one memory smmu map failed, ret:%d ctl_id:%d, client:%d\n",
  142. ret, ctl_id, hwfence_data->hw_fence_client_id);
  143. return ret;
  144. }
  145. /* get queue header offset */
  146. queue_va = hwfence_data->mem_descriptor.virtual_addr;
  147. hfi_table_header = (struct msm_hw_fence_hfi_queue_table_header *)queue_va;
  148. qhdr0_offset = hfi_table_header->qhdr0_offset;
  149. /* initialize tx_wm pointer */
  150. hfi_queue_header_va = (struct msm_hw_fence_hfi_queue_header *)(queue_va + qhdr0_offset);
  151. hwfence_data->txq_tx_wm_va = &hfi_queue_header_va->tx_wm;
  152. /* initialize txq wr_ptr addr pointer */
  153. queue_pa = ctl_hfi_iova;
  154. hfi_queue_header_pa = (struct msm_hw_fence_hfi_queue_header *)(queue_pa + qhdr0_offset);
  155. hwfence_data->txq_wr_ptr_pa = &hfi_queue_header_pa->write_index;
  156. SDE_DEBUG("hwfence registered ctl:%d client:%d handle:0x%pK tx_wm:0x%x wr_idx:0x%x\n",
  157. ctl_id, hwfence_data->hw_fence_client_id, hwfence_data->hw_fence_handle,
  158. *hwfence_data->txq_tx_wm_va, *hwfence_data->txq_wr_ptr_pa);
  159. return 0;
  160. }
  161. void sde_hw_fence_deinit(struct sde_hw_ctl *hw_ctl)
  162. {
  163. struct sde_hw_fence_data *hwfence_data;
  164. if (!hw_ctl)
  165. return;
  166. hwfence_data = &hw_ctl->hwfence_data;
  167. /* client was not registered */
  168. if (IS_ERR_OR_NULL(hwfence_data->hw_fence_handle))
  169. return;
  170. SDE_DEBUG("hwfence deregister ctl_id:%d hw_fence_client_id:%d\n",
  171. hw_ctl->idx - CTL_0, hwfence_data->hw_fence_client_id);
  172. msm_hw_fence_deregister(hwfence_data->hw_fence_handle);
  173. hwfence_data->hw_fence_handle = NULL;
  174. }
  175. static int sde_fence_create_hw_fence(struct sde_hw_ctl *hw_ctl, struct sde_fence *sde_fence)
  176. {
  177. struct sde_hw_fence_data *data;
  178. struct msm_hw_fence_create_params params;
  179. int ctl_id;
  180. u64 hwfence_index;
  181. int ret;
  182. if (!hw_ctl)
  183. return -EINVAL;
  184. ctl_id = hw_ctl->idx - CTL_0;
  185. data = &hw_ctl->hwfence_data;
  186. if (IS_ERR_OR_NULL(data->hw_fence_handle)) {
  187. SDE_ERROR("unexpected handle for ctl_id:%d\n", ctl_id);
  188. return -EINVAL;
  189. }
  190. params.fence = &sde_fence->base;
  191. params.handle = &hwfence_index;
  192. /* Create the HW fence */
  193. ret = msm_hw_fence_create(data->hw_fence_handle, &params);
  194. if (ret) {
  195. SDE_ERROR("failed to create hw_fence for client:%d ctx:%llu seqno:%llu\n", ctl_id,
  196. sde_fence->base.context, sde_fence->base.seqno);
  197. } else {
  198. /* store ctl and index for this fence */
  199. sde_fence->hwfence_out_ctl = hw_ctl;
  200. sde_fence->hwfence_index = hwfence_index;
  201. SDE_DEBUG("create hfence index:%llu ctl:%d ctx:%llu seqno:%llu name:%s\n",
  202. sde_fence->hwfence_index, ctl_id, sde_fence->base.context,
  203. sde_fence->base.seqno, sde_fence->name);
  204. }
  205. return ret;
  206. }
  207. static inline char *_get_client_id_name(int hw_fence_client_id)
  208. {
  209. switch (hw_fence_client_id) {
  210. case HW_FENCE_CLIENT_ID_CTX0:
  211. return "HW_FENCE_CLIENT_ID_CTX0";
  212. case HW_FENCE_CLIENT_ID_CTL0:
  213. return "HW_FENCE_CLIENT_ID_CTL0";
  214. case HW_FENCE_CLIENT_ID_CTL1:
  215. return "HW_FENCE_CLIENT_ID_CTL1";
  216. case HW_FENCE_CLIENT_ID_CTL2:
  217. return "HW_FENCE_CLIENT_ID_CTL2";
  218. case HW_FENCE_CLIENT_ID_CTL3:
  219. return "HW_FENCE_CLIENT_ID_CTL3";
  220. case HW_FENCE_CLIENT_ID_CTL4:
  221. return "HW_FENCE_CLIENT_ID_CTL4";
  222. case HW_FENCE_CLIENT_ID_CTL5:
  223. return "HW_FENCE_CLIENT_ID_CTL15";
  224. default:
  225. return "Unknown";
  226. }
  227. return "unknown";
  228. }
  229. static void _cleanup_fences_refcount(struct dma_fence **fences, u32 num_fences)
  230. {
  231. int i;
  232. for (i = 0; i < num_fences; i++)
  233. dma_fence_put(fences[i]);
  234. }
  235. int sde_fence_register_hw_fences_wait(struct sde_hw_ctl *hw_ctl, struct dma_fence **fences,
  236. u32 num_fences)
  237. {
  238. struct sde_hw_fence_data *data;
  239. int i, j, ret;
  240. int ctl_id;
  241. struct dma_fence_array *temp_array = NULL;
  242. struct dma_fence *base_fence;
  243. struct dma_fence **hw_fences;
  244. u32 num_hw_fences;
  245. struct dma_fence **fence_list;
  246. struct dma_fence_array *array = NULL;
  247. int array_childs = 0;
  248. int array_count = 0;
  249. int fence_list_index = 0;
  250. u64 seqno;
  251. if (!hw_ctl) {
  252. SDE_ERROR("wrong ctl\n");
  253. return -EINVAL;
  254. }
  255. ctl_id = hw_ctl->idx - CTL_0;
  256. data = &hw_ctl->hwfence_data;
  257. if (IS_ERR_OR_NULL(data->hw_fence_handle)) {
  258. SDE_ERROR("unexpected handle for ctl_id:%d\n", ctl_id);
  259. return -EINVAL;
  260. }
  261. SDE_DEBUG("register for wait fences:%d ctl_id:%d hw_fence_client:%s\n",
  262. num_fences, ctl_id, _get_client_id_name(data->hw_fence_client_id));
  263. for (i = 0; i < num_fences; i++) {
  264. /* get a refcount for each of the fences */
  265. dma_fence_get(fences[i]);
  266. if (dma_fence_is_array(fences[i])) {
  267. array_count++;
  268. array = container_of(fences[i], struct dma_fence_array, base);
  269. array_childs += array->num_fences;
  270. }
  271. SDE_DEBUG("registering fence: ctx:%llu seqno:%llu\n",
  272. (fences[i])->context, (fences[i])->seqno);
  273. }
  274. if (num_fences > 1) {
  275. /* fence_list memory is freed during fence-array release */
  276. fence_list = kzalloc(((num_fences - array_count) + array_childs)
  277. * (sizeof(struct dma_fence *)), GFP_KERNEL);
  278. if (!fence_list) {
  279. _cleanup_fences_refcount(fences, num_fences);
  280. return -EINVAL;
  281. }
  282. /* populate fence_list with the fences */
  283. for (i = 0; i < num_fences; i++) {
  284. if (dma_fence_is_array(fences[i])) {
  285. array = container_of(fences[i], struct dma_fence_array, base);
  286. for (j = 0; j < array->num_fences; j++) {
  287. /* get a refcount for each of the child fences */
  288. dma_fence_get(array->fences[j]);
  289. fence_list[fence_list_index++] = array->fences[j];
  290. }
  291. if (array->num_fences) /* print the first fence from array */
  292. SDE_EVT32(ctl_id, num_fences, array->num_fences, i,
  293. SDE_EVTLOG_H32(array->fences[0]->context),
  294. SDE_EVTLOG_L32(array->fences[0]->context),
  295. SDE_EVTLOG_H32(array->fences[0]->seqno),
  296. SDE_EVTLOG_L32(array->fences[0]->seqno));
  297. else
  298. SDE_EVT32(ctl_id, num_fences, array->num_fences, i,
  299. SDE_EVTLOG_ERROR);
  300. /* remove refcount on parent */
  301. dma_fence_put(fences[i]);
  302. } else {
  303. fence_list[fence_list_index++] = fences[i];
  304. SDE_EVT32(ctl_id, num_fences, i, SDE_EVTLOG_H32(fences[i]->context),
  305. SDE_EVTLOG_L32(fences[i]->context),
  306. SDE_EVTLOG_H32(fences[i]->seqno),
  307. SDE_EVTLOG_L32(fences[i]->seqno));
  308. }
  309. }
  310. seqno = data->hw_fence_array_seqno++;
  311. temp_array = dma_fence_array_create(fence_list_index, fence_list,
  312. data->dma_context, seqno, 0);
  313. if (!temp_array) {
  314. SDE_ERROR("unable to create fence array, cant register for wait\n");
  315. _cleanup_fences_refcount(fences, num_fences);
  316. kfree(fence_list);
  317. return -EINVAL;
  318. }
  319. SDE_EVT32(ctl_id, fence_list_index, SDE_EVTLOG_H32(data->dma_context),
  320. SDE_EVTLOG_L32(data->dma_context), SDE_EVTLOG_H32(seqno),
  321. SDE_EVTLOG_L32(seqno));
  322. base_fence = &temp_array->base;
  323. hw_fences = &base_fence;
  324. num_hw_fences = 1;
  325. } else {
  326. struct dma_fence_array *tmp_array;
  327. hw_fences = fences;
  328. num_hw_fences = num_fences;
  329. tmp_array = dma_fence_is_array(fences[0]) ?
  330. container_of(fences[0], struct dma_fence_array, base) :
  331. NULL;
  332. SDE_EVT32(ctl_id, num_hw_fences, SDE_EVTLOG_H32(fences[0]->context),
  333. SDE_EVTLOG_L32(fences[0]->context), SDE_EVTLOG_H32(fences[0]->seqno),
  334. SDE_EVTLOG_L32(fences[0]->seqno), fences[0]->flags,
  335. tmp_array ? tmp_array->num_fences : SDE_EVTLOG_FUNC_CASE2);
  336. }
  337. /* register for wait */
  338. ret = msm_hw_fence_wait_update(data->hw_fence_handle, hw_fences, num_hw_fences, true);
  339. if (ret)
  340. SDE_ERROR("failed to register wait fences for ctl_id:%d ret:%d\n", ctl_id, ret);
  341. /* fence-array put will release each individual extra refcount during array release */
  342. if (temp_array)
  343. dma_fence_put(&temp_array->base);
  344. else
  345. dma_fence_put(fences[0]);
  346. SDE_EVT32_VERBOSE(ctl_id, num_fences, ret);
  347. return ret;
  348. }
  349. static int _arm_output_hw_fence(struct sde_hw_ctl *hw_ctl, bool vid_mode, u32 line_count,
  350. u32 debugfs_hw_fence)
  351. {
  352. struct sde_hw_fence_data *data;
  353. u32 ipcc_out_signal;
  354. int ctl_id;
  355. if (!hw_ctl || !hw_ctl->ops.hw_fence_trigger_output_fence ||
  356. !hw_ctl->ops.hw_fence_update_output_fence) {
  357. SDE_ERROR("missing ctl/trigger or update fence %d\n", !hw_ctl);
  358. return -EINVAL;
  359. }
  360. ctl_id = hw_ctl->idx - CTL_0;
  361. data = &hw_ctl->hwfence_data;
  362. if (data->ipcc_out_signal_pp_idx >= MAX_SDE_HFENCE_OUT_SIGNAL_PING_PONG) {
  363. /* This should not have happened!, review the ping pong calculation */
  364. SDE_ERROR("Wrong pp_idx:%d, max:%d\n", data->ipcc_out_signal_pp_idx,
  365. MAX_SDE_HFENCE_OUT_SIGNAL_PING_PONG);
  366. return -EINVAL;
  367. }
  368. ipcc_out_signal = data->ipcc_out_signal_pp[data->ipcc_out_signal_pp_idx];
  369. data->ipcc_out_signal_pp_idx = (++data->ipcc_out_signal_pp_idx %
  370. MAX_SDE_HFENCE_OUT_SIGNAL_PING_PONG);
  371. SDE_DEBUG("out-fence ctl_id:%d out_signal:%d hw_fence_client:%s\n",
  372. ctl_id, ipcc_out_signal, _get_client_id_name(data->hw_fence_client_id));
  373. if ((debugfs_hw_fence & SDE_OUTPUT_HW_FENCE_TIMESTAMP) &&
  374. hw_ctl->ops.hw_fence_output_timestamp_ctrl)
  375. hw_ctl->ops.hw_fence_output_timestamp_ctrl(hw_ctl, true, false);
  376. /* update client/signal output fence */
  377. hw_ctl->ops.hw_fence_update_output_fence(hw_ctl, data->ipcc_out_client, ipcc_out_signal);
  378. SDE_EVT32_VERBOSE(ctl_id, ipcc_out_signal);
  379. /* arm dpu to trigger output fence signal once ready */
  380. if (line_count)
  381. hw_ctl->ops.hw_fence_trigger_output_fence(hw_ctl,
  382. HW_FENCE_TRIGGER_SEL_PROG_LINE_COUNT);
  383. else if (vid_mode && (hw_ctl->caps->features & BIT(SDE_CTL_HW_FENCE_TRIGGER_SEL)))
  384. hw_ctl->ops.hw_fence_trigger_output_fence(hw_ctl, HW_FENCE_TRIGGER_SEL_VID_MODE);
  385. else
  386. hw_ctl->ops.hw_fence_trigger_output_fence(hw_ctl, HW_FENCE_TRIGGER_SEL_CMD_MODE);
  387. return 0;
  388. }
  389. static int _sde_fence_arm_output_hw_fence(struct sde_fence_context *ctx, bool vid_mode,
  390. u32 line_count, u32 debugfs_hw_fence)
  391. {
  392. struct sde_hw_ctl *hw_ctl = NULL;
  393. struct sde_fence *fc, *next;
  394. spin_lock(&ctx->list_lock);
  395. if (list_empty(&ctx->fence_list_head)) {
  396. spin_unlock(&ctx->list_lock);
  397. return 0;
  398. }
  399. list_for_each_entry_safe(fc, next, &ctx->fence_list_head, fence_list) {
  400. struct dma_fence *fence = &fc->base;
  401. /* this is not hw-fence, or already processed */
  402. if (!test_bit(MSM_HW_FENCE_FLAG_ENABLED_BIT, &fence->flags))
  403. continue;
  404. hw_ctl = fc->hwfence_out_ctl;
  405. if (!hw_ctl) {
  406. /*
  407. * We flaged an output dma-fence as hw-fence but the hw ctl to handle
  408. * it is not available, this should not have happened, but if it does,
  409. * this can translate to a fence-timeout!
  410. */
  411. SDE_ERROR("invalid hw ctl, this can cause a fence-timeout!\n");
  412. SDE_EVT32(SDE_EVTLOG_ERROR, SDE_EVTLOG_FUNC_CASE1, fence->flags,
  413. fence->context, fence->seqno);
  414. spin_unlock(&ctx->list_lock);
  415. return -EINVAL;
  416. }
  417. }
  418. spin_unlock(&ctx->list_lock);
  419. /* arm dpu to trigger output hw-fence ipcc signal upon completion */
  420. if (hw_ctl)
  421. _arm_output_hw_fence(hw_ctl, vid_mode, line_count, debugfs_hw_fence);
  422. return 0;
  423. }
  424. void sde_fence_output_hw_fence_dir_write_init(struct sde_hw_ctl *hw_ctl)
  425. {
  426. if (hw_ctl && hw_ctl->ops.hw_fence_output_fence_dir_write_init)
  427. hw_ctl->ops.hw_fence_output_fence_dir_write_init(hw_ctl,
  428. hw_ctl->hwfence_data.txq_wr_ptr_pa, HW_FENCE_DIR_WRITE_SIZE,
  429. HW_FENCE_DIR_WRITE_MASK);
  430. }
  431. /* update output hw_fences txq */
  432. int sde_fence_update_hw_fences_txq(struct sde_fence_context *ctx, bool vid_mode, u32 line_count,
  433. u32 debugfs_hw_fence)
  434. {
  435. int ret = 0;
  436. struct sde_hw_fence_data *data;
  437. struct sde_fence *fc, *next;
  438. struct sde_hw_ctl *hw_ctl = NULL;
  439. int ctl_id;
  440. bool txq_updated = false;
  441. spin_lock(&ctx->list_lock);
  442. if (list_empty(&ctx->fence_list_head)) {
  443. spin_unlock(&ctx->list_lock);
  444. return 0;
  445. }
  446. list_for_each_entry_safe(fc, next, &ctx->fence_list_head, fence_list) {
  447. struct dma_fence *fence = &fc->base;
  448. /* this is not hw-fence, or already processed */
  449. if (!test_bit(MSM_HW_FENCE_FLAG_ENABLED_BIT, &fence->flags) ||
  450. fc->txq_updated_fence)
  451. continue;
  452. hw_ctl = fc->hwfence_out_ctl;
  453. if (!hw_ctl) {
  454. /* We flaged an output dma-fence as hw-fence but the hw ctl to handle
  455. * it is not available, this should not have happened, but if it does,
  456. * this can translate to a fence-timeout!
  457. */
  458. SDE_ERROR("invalid hw ctl, this can cause a fence-timeout!\n");
  459. SDE_EVT32(SDE_EVTLOG_FUNC_CASE1, fence->flags, fence->context,
  460. fence->seqno, SDE_EVTLOG_ERROR);
  461. ret = -EINVAL;
  462. goto exit;
  463. }
  464. ctl_id = hw_ctl->idx - CTL_0;
  465. data = &hw_ctl->hwfence_data;
  466. if (IS_ERR_OR_NULL(data->hw_fence_handle)) {
  467. SDE_ERROR("unexpected handle for ctl_id:%d, this can fence-timeout\n",
  468. ctl_id);
  469. SDE_EVT32(SDE_EVTLOG_FUNC_CASE2, fence->flags, fence->context,
  470. fence->seqno, ctl_id, SDE_EVTLOG_ERROR);
  471. ret = -EINVAL;
  472. goto exit;
  473. }
  474. /* update hw-fence tx queue */
  475. SDE_EVT32(ctl_id, SDE_EVTLOG_H32(fc->hwfence_index),
  476. SDE_EVTLOG_L32(fc->hwfence_index), *data->txq_tx_wm_va);
  477. ret = msm_hw_fence_update_txq(data->hw_fence_handle, fc->hwfence_index, 0, 0);
  478. if (ret) {
  479. SDE_ERROR("fail txq update index:%llu fctx:%llu seqno:%llu client:%d\n",
  480. fc->hwfence_index, fence->context, fence->seqno,
  481. data->hw_fence_client_id);
  482. SDE_EVT32(SDE_EVTLOG_FUNC_CASE3, fence->flags, fence->context,
  483. fence->seqno, ctl_id, SDE_EVTLOG_ERROR);
  484. goto exit;
  485. }
  486. /* update hw-fence tx queue wr_idx data */
  487. if (hw_ctl->ops.hw_fence_output_fence_dir_write_data)
  488. hw_ctl->ops.hw_fence_output_fence_dir_write_data(hw_ctl,
  489. *data->txq_tx_wm_va);
  490. /* avoid updating txq more than once and avoid repeating the same fence twice */
  491. txq_updated = fc->txq_updated_fence = true;
  492. SDE_DEBUG("update txq fence:0x%pK ctx:%llu seqno:%llu f:0x%llx ctl:%d vid:%d\n",
  493. fence, fence->context, fence->seqno, fence->flags, ctl_id, vid_mode);
  494. /* We will update TxQ one time per frame */
  495. if (txq_updated)
  496. break;
  497. }
  498. exit:
  499. spin_unlock(&ctx->list_lock);
  500. /* arm dpu to trigger output hw-fence ipcc signal upon completion in vid-mode */
  501. if ((txq_updated && hw_ctl) || line_count)
  502. _sde_fence_arm_output_hw_fence(ctx, vid_mode, line_count, debugfs_hw_fence);
  503. return ret;
  504. }
  505. static void _sde_hw_fence_release(struct sde_fence *f)
  506. {
  507. struct sde_hw_fence_data *data;
  508. struct sde_hw_ctl *hw_ctl = f->hwfence_out_ctl;
  509. int ctl_id;
  510. int ret;
  511. if (!hw_ctl) {
  512. SDE_ERROR("invalid hw_ctl\n");
  513. return;
  514. }
  515. ctl_id = hw_ctl->idx - CTL_0;
  516. data = &hw_ctl->hwfence_data;
  517. if (IS_ERR_OR_NULL(data->hw_fence_handle)) {
  518. SDE_ERROR("unexpected handle for ctl_id:%d\n", ctl_id);
  519. return;
  520. }
  521. SDE_DEBUG("destroy hw fence ctl_id:%d ctx:%llu seqno:%llu name:%s\n",
  522. ctl_id, f->base.context, f->base.seqno, f->name);
  523. /* Delete the HW fence */
  524. ret = msm_hw_fence_destroy(data->hw_fence_handle, &f->base);
  525. if (ret)
  526. SDE_ERROR("failed to destroy hw_fence for ctl_id:%d ctx:%llu seqno:%llu\n", ctl_id,
  527. f->base.context, f->base.seqno);
  528. }
  529. static int _reset_hw_fence_timeline(struct sde_hw_ctl *hw_ctl, u32 flags)
  530. {
  531. struct sde_hw_fence_data *data;
  532. int ret = 0;
  533. data = &hw_ctl->hwfence_data;
  534. if (!IS_ERR_OR_NULL(data->hw_fence_handle)) {
  535. SDE_EVT32(data->hw_fence_client_id);
  536. ret = msm_hw_fence_reset_client(data->hw_fence_handle, flags);
  537. if (ret) {
  538. pr_err("failed to reset client %d\n", data->hw_fence_client_id);
  539. return -EINVAL;
  540. }
  541. }
  542. return ret;
  543. }
  544. int sde_fence_update_input_hw_fence_signal(struct sde_hw_ctl *hw_ctl, u32 debugfs_hw_fence,
  545. struct sde_hw_mdp *hw_mdp, bool disable)
  546. {
  547. struct sde_hw_fence_data *data;
  548. u32 ipcc_signal_id;
  549. u32 ipcc_client_id;
  550. int ctl_id;
  551. u64 qtime;
  552. /* we must support sw_override as well, so check both functions */
  553. if (!hw_mdp || !hw_ctl || !hw_ctl->ops.hw_fence_update_input_fence ||
  554. !hw_ctl->ops.hw_fence_trigger_sw_override) {
  555. SDE_ERROR("missing ctl/override/update fence %d\n", !hw_ctl);
  556. return -EINVAL;
  557. }
  558. ctl_id = hw_ctl->idx - CTL_0;
  559. data = &hw_ctl->hwfence_data;
  560. if (disable) {
  561. hw_ctl->ops.hw_fence_ctrl(hw_ctl, false, false, 0);
  562. return -EPERM;
  563. }
  564. if ((debugfs_hw_fence & SDE_INPUT_HW_FENCE_TIMESTAMP)
  565. && hw_mdp->ops.hw_fence_input_timestamp_ctrl)
  566. hw_mdp->ops.hw_fence_input_timestamp_ctrl(hw_mdp, true, false);
  567. ipcc_signal_id = data->ipcc_in_signal;
  568. ipcc_client_id = data->ipcc_in_client;
  569. SDE_DEBUG("configure input signal:%d out client:%d ctl_id:%d\n", ipcc_signal_id,
  570. ipcc_client_id, ctl_id);
  571. /* configure dpu hw for the client/signal pair signaling input-fence */
  572. hw_ctl->ops.hw_fence_update_input_fence(hw_ctl, ipcc_client_id, ipcc_signal_id);
  573. /* Enable hw-fence for this ctrl-path */
  574. hw_ctl->ops.hw_fence_ctrl(hw_ctl, true, true, 1);
  575. qtime = arch_timer_read_counter();
  576. SDE_EVT32(ctl_id, ipcc_signal_id, ipcc_client_id, SDE_EVTLOG_H32(qtime),
  577. SDE_EVTLOG_L32(qtime));
  578. return 0;
  579. }
  580. void *sde_sync_get(uint64_t fd)
  581. {
  582. /* force signed compare, fdget accepts an int argument */
  583. return (signed int)fd >= 0 ? sync_file_get_fence(fd) : NULL;
  584. }
  585. void sde_sync_put(void *fence)
  586. {
  587. if (fence)
  588. dma_fence_put(fence);
  589. }
  590. void sde_fence_dump(struct dma_fence *fence)
  591. {
  592. char timeline_str[TIMELINE_VAL_LENGTH];
  593. if (fence->ops->timeline_value_str)
  594. fence->ops->timeline_value_str(fence, timeline_str, TIMELINE_VAL_LENGTH);
  595. SDE_ERROR(
  596. "fence drv name:%s timeline name:%s seqno:0x%llx timeline:%s signaled:0x%x status:%d flags:0x%x\n",
  597. fence->ops->get_driver_name(fence),
  598. fence->ops->get_timeline_name(fence),
  599. fence->seqno, timeline_str,
  600. fence->ops->signaled ?
  601. fence->ops->signaled(fence) : 0xffffffff,
  602. dma_fence_get_status(fence), fence->flags);
  603. }
  604. static void sde_fence_dump_user_fds_info(struct dma_fence *base_fence)
  605. {
  606. struct dma_fence_array *array;
  607. struct dma_fence *user_fence;
  608. int i;
  609. array = container_of(base_fence, struct dma_fence_array, base);
  610. if (test_bit(SPEC_FENCE_FLAG_FENCE_ARRAY, &base_fence->flags) &&
  611. test_bit(SPEC_FENCE_FLAG_ARRAY_BIND, &base_fence->flags)) {
  612. for (i = 0; i < array->num_fences; i++) {
  613. user_fence = array->fences[i];
  614. if (user_fence) {
  615. dma_fence_get(user_fence);
  616. sde_fence_dump(user_fence);
  617. dma_fence_put(user_fence);
  618. }
  619. }
  620. }
  621. }
  622. signed long sde_sync_wait(void *fnc, long timeout_ms)
  623. {
  624. struct dma_fence *fence = fnc;
  625. int rc, status = 0;
  626. if (!fence)
  627. return -EINVAL;
  628. else if (dma_fence_is_signaled(fence))
  629. return timeout_ms ? msecs_to_jiffies(timeout_ms) : 1;
  630. rc = dma_fence_wait_timeout(fence, true,
  631. msecs_to_jiffies(timeout_ms));
  632. if (!rc || (rc == -EINVAL) || fence->error) {
  633. status = dma_fence_get_status(fence);
  634. if (test_bit(SPEC_FENCE_FLAG_FENCE_ARRAY, &fence->flags)) {
  635. if (status == -EINVAL) {
  636. SDE_INFO("spec fence bind failure status:%d\n", status);
  637. rc = -EBADF;
  638. } else if (fence->ops->signaled && fence->ops->signaled(fence)) {
  639. SDE_INFO("spec fence status:%d\n", status);
  640. } else {
  641. sde_fence_dump(fence);
  642. sde_fence_dump_user_fds_info(fence);
  643. }
  644. } else {
  645. sde_fence_dump(fence);
  646. }
  647. }
  648. return rc;
  649. }
  650. uint32_t sde_sync_get_name_prefix(void *fence)
  651. {
  652. const char *name;
  653. uint32_t i, prefix;
  654. struct dma_fence *f = fence;
  655. if (!fence)
  656. return 0;
  657. name = f->ops->get_driver_name(f);
  658. if (!name)
  659. return 0;
  660. prefix = 0x0;
  661. for (i = 0; i < sizeof(uint32_t) && name[i]; ++i)
  662. prefix = (prefix << CHAR_BIT) | name[i];
  663. return prefix;
  664. }
  665. static void sde_fence_destroy(struct kref *kref)
  666. {
  667. struct sde_fence_context *ctx;
  668. if (!kref) {
  669. SDE_ERROR("received invalid kref\n");
  670. return;
  671. }
  672. ctx = container_of(kref, struct sde_fence_context, kref);
  673. kfree(ctx);
  674. }
  675. static inline struct sde_fence *to_sde_fence(struct dma_fence *fence)
  676. {
  677. return container_of(fence, struct sde_fence, base);
  678. }
  679. static const char *sde_fence_get_driver_name(struct dma_fence *fence)
  680. {
  681. struct sde_fence *f = to_sde_fence(fence);
  682. return f->name;
  683. }
  684. static const char *sde_fence_get_timeline_name(struct dma_fence *fence)
  685. {
  686. struct sde_fence *f = to_sde_fence(fence);
  687. return f->ctx->name;
  688. }
  689. static bool sde_fence_enable_signaling(struct dma_fence *fence)
  690. {
  691. return true;
  692. }
  693. static bool sde_fence_signaled(struct dma_fence *fence)
  694. {
  695. struct sde_fence *f = to_sde_fence(fence);
  696. bool status;
  697. status = ((int)(fence->seqno - f->ctx->done_count) <= 0);
  698. SDE_DEBUG("status:%d fence seq:%llu and timeline:%u\n",
  699. status, fence->seqno, f->ctx->done_count);
  700. return status;
  701. }
  702. static void sde_fence_release(struct dma_fence *fence)
  703. {
  704. struct sde_fence *f;
  705. if (fence) {
  706. f = to_sde_fence(fence);
  707. /* Delete the HW fence */
  708. if (test_bit(MSM_HW_FENCE_FLAG_ENABLED_BIT, &fence->flags))
  709. _sde_hw_fence_release(f);
  710. kref_put(&f->ctx->kref, sde_fence_destroy);
  711. kfree(f);
  712. }
  713. }
  714. static void sde_fence_value_str(struct dma_fence *fence, char *str, int size)
  715. {
  716. if (!fence || !str)
  717. return;
  718. snprintf(str, size, "%llu", fence->seqno);
  719. }
  720. static void sde_fence_timeline_value_str(struct dma_fence *fence, char *str,
  721. int size)
  722. {
  723. struct sde_fence *f = to_sde_fence(fence);
  724. if (!fence || !f->ctx || !str)
  725. return;
  726. snprintf(str, size, "%d", f->ctx->done_count);
  727. }
  728. static struct dma_fence_ops sde_fence_ops = {
  729. .get_driver_name = sde_fence_get_driver_name,
  730. .get_timeline_name = sde_fence_get_timeline_name,
  731. .enable_signaling = sde_fence_enable_signaling,
  732. .signaled = sde_fence_signaled,
  733. .wait = dma_fence_default_wait,
  734. .release = sde_fence_release,
  735. .fence_value_str = sde_fence_value_str,
  736. .timeline_value_str = sde_fence_timeline_value_str,
  737. };
  738. /**
  739. * _sde_fence_create_fd - create fence object and return an fd for it
  740. * This function is NOT thread-safe.
  741. * @timeline: Timeline to associate with fence
  742. * @val: Timeline value at which to signal the fence
  743. * Return: File descriptor on success, or error code on error
  744. */
  745. static int _sde_fence_create_fd(void *fence_ctx, uint32_t val, struct sde_hw_ctl *hw_ctl)
  746. {
  747. struct sde_fence *sde_fence;
  748. struct sync_file *sync_file;
  749. signed int fd = -EINVAL;
  750. struct sde_fence_context *ctx = fence_ctx;
  751. if (!ctx) {
  752. SDE_ERROR("invalid context\n");
  753. goto exit;
  754. }
  755. sde_fence = kzalloc(sizeof(*sde_fence), GFP_KERNEL);
  756. if (!sde_fence)
  757. return -ENOMEM;
  758. sde_fence->ctx = fence_ctx;
  759. snprintf(sde_fence->name, SDE_FENCE_NAME_SIZE, "sde_fence:%s:%u",
  760. sde_fence->ctx->name, val);
  761. dma_fence_init(&sde_fence->base, &sde_fence_ops, &ctx->lock,
  762. ctx->context, val);
  763. kref_get(&ctx->kref);
  764. /* create fd */
  765. fd = get_unused_fd_flags(0);
  766. if (fd < 0) {
  767. SDE_ERROR("failed to get_unused_fd_flags(), %s\n",
  768. sde_fence->name);
  769. dma_fence_put(&sde_fence->base);
  770. goto exit;
  771. }
  772. /* create fence */
  773. sync_file = sync_file_create(&sde_fence->base);
  774. if (sync_file == NULL) {
  775. put_unused_fd(fd);
  776. fd = -EINVAL;
  777. SDE_ERROR("couldn't create fence, %s\n", sde_fence->name);
  778. dma_fence_put(&sde_fence->base);
  779. goto exit;
  780. }
  781. /* If ctl_id is valid, try to create a hw-fence */
  782. if (hw_ctl)
  783. sde_fence_create_hw_fence(hw_ctl, sde_fence);
  784. fd_install(fd, sync_file->file);
  785. sde_fence->fd = fd;
  786. spin_lock(&ctx->list_lock);
  787. list_add_tail(&sde_fence->fence_list, &ctx->fence_list_head);
  788. spin_unlock(&ctx->list_lock);
  789. exit:
  790. return fd;
  791. }
  792. struct sde_fence_context *sde_fence_init(const char *name, uint32_t drm_id)
  793. {
  794. struct sde_fence_context *ctx;
  795. if (!name) {
  796. SDE_ERROR("invalid argument(s)\n");
  797. return ERR_PTR(-EINVAL);
  798. }
  799. ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
  800. if (!ctx) {
  801. SDE_ERROR("failed to alloc fence ctx\n");
  802. return ERR_PTR(-ENOMEM);
  803. }
  804. strlcpy(ctx->name, name, ARRAY_SIZE(ctx->name));
  805. ctx->drm_id = drm_id;
  806. kref_init(&ctx->kref);
  807. ctx->context = dma_fence_context_alloc(1);
  808. spin_lock_init(&ctx->lock);
  809. spin_lock_init(&ctx->list_lock);
  810. INIT_LIST_HEAD(&ctx->fence_list_head);
  811. return ctx;
  812. }
  813. void sde_fence_deinit(struct sde_fence_context *ctx)
  814. {
  815. if (!ctx) {
  816. SDE_ERROR("invalid fence\n");
  817. return;
  818. }
  819. kref_put(&ctx->kref, sde_fence_destroy);
  820. }
  821. void sde_fence_prepare(struct sde_fence_context *ctx)
  822. {
  823. unsigned long flags;
  824. if (!ctx) {
  825. SDE_ERROR("invalid argument(s), fence %pK\n", ctx);
  826. } else {
  827. spin_lock_irqsave(&ctx->lock, flags);
  828. ++ctx->commit_count;
  829. spin_unlock_irqrestore(&ctx->lock, flags);
  830. }
  831. }
  832. static void _sde_fence_trigger(struct sde_fence_context *ctx, bool error, ktime_t ts)
  833. {
  834. unsigned long flags;
  835. struct sde_fence *fc, *next;
  836. bool is_signaled = false;
  837. kref_get(&ctx->kref);
  838. spin_lock(&ctx->list_lock);
  839. if (list_empty(&ctx->fence_list_head)) {
  840. SDE_DEBUG("nothing to trigger!\n");
  841. goto end;
  842. }
  843. list_for_each_entry_safe(fc, next, &ctx->fence_list_head, fence_list) {
  844. spin_lock_irqsave(&ctx->lock, flags);
  845. if (error)
  846. dma_fence_set_error(&fc->base, -EBUSY);
  847. is_signaled = sde_fence_signaled(&fc->base);
  848. if (is_signaled)
  849. dma_fence_signal_timestamp_locked(&fc->base, ts);
  850. spin_unlock_irqrestore(&ctx->lock, flags);
  851. if (is_signaled) {
  852. list_del_init(&fc->fence_list);
  853. dma_fence_put(&fc->base);
  854. }
  855. }
  856. end:
  857. spin_unlock(&ctx->list_lock);
  858. kref_put(&ctx->kref, sde_fence_destroy);
  859. }
  860. int sde_fence_create(struct sde_fence_context *ctx, uint64_t *val,
  861. uint32_t offset, struct sde_hw_ctl *hw_ctl)
  862. {
  863. uint32_t trigger_value;
  864. int fd, rc = -EINVAL;
  865. unsigned long flags;
  866. if (!ctx || !val) {
  867. SDE_ERROR("invalid argument(s), fence %d, pval %d\n",
  868. ctx != NULL, val != NULL);
  869. return rc;
  870. }
  871. /*
  872. * Allow created fences to have a constant offset with respect
  873. * to the timeline. This allows us to delay the fence signalling
  874. * w.r.t. the commit completion (e.g., an offset of +1 would
  875. * cause fences returned during a particular commit to signal
  876. * after an additional delay of one commit, rather than at the
  877. * end of the current one.
  878. */
  879. spin_lock_irqsave(&ctx->lock, flags);
  880. trigger_value = ctx->commit_count + offset;
  881. spin_unlock_irqrestore(&ctx->lock, flags);
  882. fd = _sde_fence_create_fd(ctx, trigger_value, hw_ctl);
  883. *val = fd;
  884. SDE_DEBUG("fd:%d trigger:%d commit:%d offset:%d\n",
  885. fd, trigger_value, ctx->commit_count, offset);
  886. SDE_EVT32(ctx->drm_id, trigger_value, fd, hw_ctl ? hw_ctl->idx : 0);
  887. rc = (fd >= 0) ? 0 : fd;
  888. return rc;
  889. }
  890. void sde_fence_signal(struct sde_fence_context *ctx, ktime_t ts,
  891. enum sde_fence_event fence_event, struct sde_hw_ctl *hw_ctl)
  892. {
  893. unsigned long flags;
  894. if (!ctx) {
  895. SDE_ERROR("invalid ctx, %pK\n", ctx);
  896. return;
  897. }
  898. spin_lock_irqsave(&ctx->lock, flags);
  899. if (fence_event == SDE_FENCE_RESET_TIMELINE) {
  900. /* reset hw-fences without error */
  901. if (hw_ctl)
  902. _reset_hw_fence_timeline(hw_ctl, MSM_HW_FENCE_RESET_WITHOUT_ERROR |
  903. MSM_HW_FENCE_RESET_WITHOUT_DESTROY);
  904. if ((int)(ctx->done_count - ctx->commit_count) < 0) {
  905. SDE_DEBUG(
  906. "timeline reset attempt! ctx:0x%x done count:%d commit:%d\n",
  907. ctx->drm_id, ctx->done_count, ctx->commit_count);
  908. ctx->done_count = ctx->commit_count;
  909. SDE_EVT32(ctx->drm_id, ctx->done_count,
  910. ctx->commit_count, ktime_to_us(ts),
  911. fence_event, SDE_EVTLOG_FUNC_CASE1);
  912. } else {
  913. spin_unlock_irqrestore(&ctx->lock, flags);
  914. return;
  915. }
  916. } else if ((int)(ctx->done_count - ctx->commit_count) < 0) {
  917. ++ctx->done_count;
  918. SDE_DEBUG("fence_signal:done count:%d commit count:%d\n",
  919. ctx->done_count, ctx->commit_count);
  920. } else {
  921. SDE_ERROR("extra signal attempt! done count:%d commit:%d\n",
  922. ctx->done_count, ctx->commit_count);
  923. SDE_EVT32(ctx->drm_id, ctx->done_count, ctx->commit_count,
  924. ktime_to_us(ts), fence_event, SDE_EVTLOG_FATAL);
  925. spin_unlock_irqrestore(&ctx->lock, flags);
  926. return;
  927. }
  928. spin_unlock_irqrestore(&ctx->lock, flags);
  929. SDE_EVT32(ctx->drm_id, ctx->done_count, ctx->commit_count,
  930. ktime_to_us(ts));
  931. _sde_fence_trigger(ctx, (fence_event == SDE_FENCE_SIGNAL_ERROR), ts);
  932. }
  933. void sde_fence_timeline_status(struct sde_fence_context *ctx,
  934. struct drm_mode_object *drm_obj)
  935. {
  936. char *obj_name;
  937. if (!ctx || !drm_obj) {
  938. SDE_ERROR("invalid input params\n");
  939. return;
  940. }
  941. switch (drm_obj->type) {
  942. case DRM_MODE_OBJECT_CRTC:
  943. obj_name = "crtc";
  944. break;
  945. case DRM_MODE_OBJECT_CONNECTOR:
  946. obj_name = "connector";
  947. break;
  948. default:
  949. obj_name = "unknown";
  950. break;
  951. }
  952. SDE_ERROR("drm obj:%s id:%d type:0x%x done_count:%d commit_count:%d\n",
  953. obj_name, drm_obj->id, drm_obj->type, ctx->done_count,
  954. ctx->commit_count);
  955. }
  956. void sde_fence_list_dump(struct dma_fence *fence, struct seq_file **s)
  957. {
  958. char timeline_str[TIMELINE_VAL_LENGTH];
  959. if (fence->ops->timeline_value_str)
  960. fence->ops->timeline_value_str(fence,
  961. timeline_str, TIMELINE_VAL_LENGTH);
  962. seq_printf(*s, "fence name:%s timeline name:%s seqno:0x%llx timeline:%s signaled:0x%x\n",
  963. fence->ops->get_driver_name(fence),
  964. fence->ops->get_timeline_name(fence),
  965. fence->seqno, timeline_str,
  966. fence->ops->signaled ?
  967. fence->ops->signaled(fence) : 0xffffffff);
  968. }
  969. void sde_debugfs_timeline_dump(struct sde_fence_context *ctx,
  970. struct drm_mode_object *drm_obj, struct seq_file **s)
  971. {
  972. char *obj_name;
  973. struct sde_fence *fc, *next;
  974. struct dma_fence *fence;
  975. if (!ctx || !drm_obj) {
  976. SDE_ERROR("invalid input params\n");
  977. return;
  978. }
  979. switch (drm_obj->type) {
  980. case DRM_MODE_OBJECT_CRTC:
  981. obj_name = "crtc";
  982. break;
  983. case DRM_MODE_OBJECT_CONNECTOR:
  984. obj_name = "connector";
  985. break;
  986. default:
  987. obj_name = "unknown";
  988. break;
  989. }
  990. seq_printf(*s, "drm obj:%s id:%d type:0x%x done_count:%d commit_count:%d\n",
  991. obj_name, drm_obj->id, drm_obj->type, ctx->done_count,
  992. ctx->commit_count);
  993. spin_lock(&ctx->list_lock);
  994. list_for_each_entry_safe(fc, next, &ctx->fence_list_head, fence_list) {
  995. fence = &fc->base;
  996. sde_fence_list_dump(fence, s);
  997. }
  998. spin_unlock(&ctx->list_lock);
  999. }