hw_fence_drv_priv.c 53 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
  4. */
  5. #include <linux/uaccess.h>
  6. #include <linux/of_platform.h>
  7. #include <linux/of_address.h>
  8. #include "hw_fence_drv_priv.h"
  9. #include "hw_fence_drv_utils.h"
  10. #include "hw_fence_drv_ipc.h"
  11. #include "hw_fence_drv_debug.h"
  12. /* Global atomic lock */
  13. #define GLOBAL_ATOMIC_STORE(drv_data, lock, val) global_atomic_store(drv_data, lock, val)
  14. #define IS_HW_FENCE_TX_QUEUE(queue_type) ((queue_type) == HW_FENCE_TX_QUEUE - 1)
  15. inline u64 hw_fence_get_qtime(struct hw_fence_driver_data *drv_data)
  16. {
  17. #ifdef HWFENCE_USE_SLEEP_TIMER
  18. return readl_relaxed(drv_data->qtime_io_mem);
  19. #else /* USE QTIMER */
  20. return arch_timer_read_counter();
  21. #endif /* HWFENCE_USE_SLEEP_TIMER */
  22. }
  23. static int init_hw_fences_queues(struct hw_fence_driver_data *drv_data,
  24. enum hw_fence_mem_reserve mem_reserve_id,
  25. struct msm_hw_fence_mem_addr *mem_descriptor,
  26. struct msm_hw_fence_queue *queues, int queues_num,
  27. int client_id)
  28. {
  29. struct msm_hw_fence_hfi_queue_table_header *hfi_table_header;
  30. struct msm_hw_fence_hfi_queue_header *hfi_queue_header;
  31. struct hw_fence_client_type_desc *desc;
  32. void *ptr, *qptr;
  33. phys_addr_t phys, qphys;
  34. u32 size, start_queue_offset, txq_idx_start = 0, txq_idx_factor = 1;
  35. int headers_size, queue_size, payload_size;
  36. int start_padding = 0, end_padding = 0;
  37. int i, ret = 0;
  38. bool skip_txq_wr_idx = false;
  39. HWFNC_DBG_INIT("mem_reserve_id:%d client_id:%d\n", mem_reserve_id, client_id);
  40. switch (mem_reserve_id) {
  41. case HW_FENCE_MEM_RESERVE_CTRL_QUEUE:
  42. headers_size = HW_FENCE_HFI_CTRL_HEADERS_SIZE;
  43. queue_size = drv_data->hw_fence_ctrl_queue_size;
  44. payload_size = HW_FENCE_CTRL_QUEUE_PAYLOAD;
  45. break;
  46. case HW_FENCE_MEM_RESERVE_CLIENT_QUEUE:
  47. if (client_id >= drv_data->clients_num ||
  48. !drv_data->hw_fence_client_queue_size[client_id].type) {
  49. HWFNC_ERR("Invalid client_id:%d for clients_num:%lu\n", client_id,
  50. drv_data->clients_num);
  51. return -EINVAL;
  52. }
  53. desc = drv_data->hw_fence_client_queue_size[client_id].type;
  54. start_padding = desc->start_padding;
  55. end_padding = desc->end_padding;
  56. headers_size = HW_FENCE_HFI_CLIENT_HEADERS_SIZE(queues_num) + start_padding +
  57. end_padding;
  58. queue_size = HW_FENCE_CLIENT_QUEUE_PAYLOAD * desc->queue_entries;
  59. payload_size = HW_FENCE_CLIENT_QUEUE_PAYLOAD;
  60. txq_idx_start = desc->txq_idx_start;
  61. txq_idx_factor = desc->txq_idx_factor ? desc->txq_idx_factor : 1;
  62. skip_txq_wr_idx = desc->skip_txq_wr_idx;
  63. break;
  64. default:
  65. HWFNC_ERR("Unexpected mem reserve id: %d\n", mem_reserve_id);
  66. return -EINVAL;
  67. }
  68. /* Reserve Virtual and Physical memory for HFI headers */
  69. ret = hw_fence_utils_reserve_mem(drv_data, mem_reserve_id, &phys, &ptr, &size, client_id);
  70. if (ret) {
  71. HWFNC_ERR("Failed to reserve id:%d client %d\n", mem_reserve_id, client_id);
  72. return -ENOMEM;
  73. }
  74. HWFNC_DBG_INIT("phys:0x%x ptr:0x%pK size:%d\n", phys, ptr, size);
  75. /* Populate Memory descriptor with address */
  76. mem_descriptor->virtual_addr = ptr;
  77. mem_descriptor->device_addr = phys;
  78. mem_descriptor->size = size; /* bytes */
  79. mem_descriptor->mem_data = NULL; /* Currently we don't need any special info */
  80. HWFNC_DBG_INIT("Initialize headers: headers_size:%d start_padding:%d end_padding:%d\n",
  81. headers_size, start_padding, end_padding);
  82. /* Initialize headers info within hfi memory */
  83. hfi_table_header = (struct msm_hw_fence_hfi_queue_table_header *)ptr;
  84. hfi_table_header->version = 0;
  85. hfi_table_header->size = size; /* bytes */
  86. /* Offset, from the Base Address, where the first queue header starts */
  87. hfi_table_header->qhdr0_offset = HW_FENCE_HFI_TABLE_HEADER_SIZE + start_padding;
  88. hfi_table_header->qhdr_size = HW_FENCE_HFI_QUEUE_HEADER_SIZE;
  89. hfi_table_header->num_q = queues_num; /* number of queues */
  90. hfi_table_header->num_active_q = queues_num;
  91. /* Initialize Queues Info within HFI memory */
  92. /*
  93. * Calculate offset where hfi queue header starts, which it is at the
  94. * end of the hfi table header
  95. */
  96. HWFNC_DBG_INIT("Initialize queues\n");
  97. hfi_queue_header = (struct msm_hw_fence_hfi_queue_header *)
  98. ((char *)ptr + hfi_table_header->qhdr0_offset);
  99. for (i = 0; i < queues_num; i++) {
  100. HWFNC_DBG_INIT("init queue[%d]\n", i);
  101. /* Calculate the offset where the Queue starts */
  102. start_queue_offset = headers_size + (i * queue_size); /* Bytes */
  103. qphys = phys + start_queue_offset; /* start of the PA for the queue elems */
  104. qptr = (char *)ptr + start_queue_offset; /* start of the va for queue elems */
  105. /* Set the physical start address in the HFI queue header */
  106. hfi_queue_header->start_addr = qphys;
  107. /* Set the queue type (i.e. RX or TX queue) */
  108. hfi_queue_header->type = IS_HW_FENCE_TX_QUEUE(i) ? HW_FENCE_TX_QUEUE :
  109. HW_FENCE_RX_QUEUE;
  110. /* Set the size of this header */
  111. hfi_queue_header->queue_size = queue_size;
  112. /* Set the payload size */
  113. hfi_queue_header->pkt_size = payload_size;
  114. /* Set write index for clients' tx queues that index from nonzero value */
  115. if (txq_idx_start && IS_HW_FENCE_TX_QUEUE(i) && !hfi_queue_header->write_index) {
  116. if (skip_txq_wr_idx)
  117. hfi_queue_header->tx_wm = txq_idx_start;
  118. hfi_queue_header->read_index = txq_idx_start;
  119. hfi_queue_header->write_index = txq_idx_start;
  120. HWFNC_DBG_INIT("init:TX_QUEUE client:%d rd_idx=%s=%lu\n", client_id,
  121. skip_txq_wr_idx ? "wr_idx=tx_wm" : "wr_idx",
  122. txq_idx_start);
  123. }
  124. /* Update memory for hfi_queue_header */
  125. wmb();
  126. /* Store Memory info in the Client data */
  127. queues[i].va_queue = qptr;
  128. queues[i].pa_queue = qphys;
  129. queues[i].va_header = hfi_queue_header;
  130. queues[i].q_size_bytes = queue_size;
  131. HWFNC_DBG_INIT("init:%s client:%d q[%d] va=0x%pK pa=0x%x hd:0x%pK sz:%u pkt:%d\n",
  132. hfi_queue_header->type == HW_FENCE_TX_QUEUE ? "TX_QUEUE" : "RX_QUEUE",
  133. client_id, i, queues[i].va_queue, queues[i].pa_queue, queues[i].va_header,
  134. queues[i].q_size_bytes, payload_size);
  135. /* Store additional tx queue rd_wr_idx properties */
  136. if (IS_HW_FENCE_TX_QUEUE(i)) {
  137. queues[i].rd_wr_idx_start = txq_idx_start;
  138. queues[i].rd_wr_idx_factor = txq_idx_factor;
  139. queues[i].skip_wr_idx = skip_txq_wr_idx;
  140. } else {
  141. queues[i].rd_wr_idx_factor = 1;
  142. }
  143. HWFNC_DBG_INIT("rd_wr_idx_start:%lu rd_wr_idx_factor:%lu skip_wr_idx:%s\n",
  144. queues[i].rd_wr_idx_start, queues[i].rd_wr_idx_factor,
  145. queues[i].skip_wr_idx ? "true" : "false");
  146. /* Next header */
  147. hfi_queue_header++;
  148. }
  149. return ret;
  150. }
  151. static inline bool _lock_client_queue(int queue_type)
  152. {
  153. /* Only lock Rx Queue */
  154. return (queue_type == (HW_FENCE_RX_QUEUE - 1)) ? true : false;
  155. }
  156. char *_get_queue_type(int queue_type)
  157. {
  158. return (queue_type == (HW_FENCE_RX_QUEUE - 1)) ? "RXQ" : "TXQ";
  159. }
  160. int hw_fence_read_queue(struct msm_hw_fence_client *hw_fence_client,
  161. struct msm_hw_fence_queue_payload *payload, int queue_type)
  162. {
  163. struct msm_hw_fence_queue *queue;
  164. if (queue_type >= HW_FENCE_CLIENT_QUEUES || !hw_fence_client || !payload) {
  165. HWFNC_ERR("Invalid queue type:%s hw_fence_client:0x%pK payload:0x%pK\n", queue_type,
  166. hw_fence_client, payload);
  167. return -EINVAL;
  168. }
  169. queue = &hw_fence_client->queues[queue_type];
  170. HWFNC_DBG_Q("read client:%lu queue:0x%pK\n", hw_fence_client->client_id, queue);
  171. return hw_fence_read_queue_helper(queue, payload);
  172. }
  173. int hw_fence_read_queue_helper(struct msm_hw_fence_queue *queue,
  174. struct msm_hw_fence_queue_payload *payload)
  175. {
  176. struct msm_hw_fence_hfi_queue_header *hfi_header;
  177. u32 read_idx, write_idx, to_read_idx;
  178. u32 *read_ptr;
  179. u32 payload_size_u32, q_size_u32;
  180. struct msm_hw_fence_queue_payload *read_ptr_payload;
  181. hfi_header = queue->va_header;
  182. q_size_u32 = (queue->q_size_bytes / sizeof(u32));
  183. payload_size_u32 = (sizeof(struct msm_hw_fence_queue_payload) / sizeof(u32));
  184. HWFNC_DBG_Q("sizeof payload:%d\n", sizeof(struct msm_hw_fence_queue_payload));
  185. if (!hfi_header || !payload) {
  186. HWFNC_ERR("Invalid queue\n");
  187. return -EINVAL;
  188. }
  189. /* Make sure data is ready before read */
  190. mb();
  191. /* Get read and write index */
  192. read_idx = readl_relaxed(&hfi_header->read_index);
  193. write_idx = readl_relaxed(&hfi_header->write_index);
  194. /* translate read and write indexes from custom indexing to dwords with no offset */
  195. if (queue->rd_wr_idx_start || queue->rd_wr_idx_factor != 1) {
  196. read_idx = (read_idx - queue->rd_wr_idx_start) * queue->rd_wr_idx_factor;
  197. write_idx = (write_idx - queue->rd_wr_idx_start) * queue->rd_wr_idx_factor;
  198. HWFNC_DBG_Q("rd_idx_u32:%lu wr_idx_u32:%lu rd_wr_idx start:%lu factor:%lu\n",
  199. read_idx, write_idx, queue->rd_wr_idx_start, queue->rd_wr_idx_factor);
  200. }
  201. HWFNC_DBG_Q("read rd_ptr:0x%pK wr_ptr:0x%pK rd_idx:%d wr_idx:%d queue:0x%pK\n",
  202. &hfi_header->read_index, &hfi_header->write_index, read_idx, write_idx, queue);
  203. if (read_idx == write_idx) {
  204. HWFNC_DBG_Q("Nothing to read!\n");
  205. return -EINVAL;
  206. }
  207. /* Move the pointer where we need to read and cast it */
  208. read_ptr = ((u32 *)queue->va_queue + read_idx);
  209. read_ptr_payload = (struct msm_hw_fence_queue_payload *)read_ptr;
  210. HWFNC_DBG_Q("read_ptr:0x%pK queue: va=0x%pK pa=0x%pK read_ptr_payload:0x%pK\n", read_ptr,
  211. queue->va_queue, queue->pa_queue, read_ptr_payload);
  212. /* Calculate the index after the read */
  213. to_read_idx = read_idx + payload_size_u32;
  214. /*
  215. * wrap-around case, here we are reading the last element of the queue, therefore set
  216. * to_read_idx, which is the index after the read, to the beginning of the
  217. * queue
  218. */
  219. if (to_read_idx >= q_size_u32)
  220. to_read_idx = 0;
  221. /* translate to_read_idx to custom indexing with offset */
  222. if (queue->rd_wr_idx_start || queue->rd_wr_idx_factor != 1) {
  223. to_read_idx = (to_read_idx / queue->rd_wr_idx_factor) + queue->rd_wr_idx_start;
  224. HWFNC_DBG_Q("translated to_read_idx:%lu rd_wr_idx start:%lu factor:%lu\n",
  225. to_read_idx, queue->rd_wr_idx_start, queue->rd_wr_idx_factor);
  226. }
  227. /* Read the Client Queue */
  228. *payload = *read_ptr_payload;
  229. /* update the read index */
  230. writel_relaxed(to_read_idx, &hfi_header->read_index);
  231. /* update memory for the index */
  232. wmb();
  233. /* Return one if queue still has contents after read */
  234. return to_read_idx == write_idx ? 0 : 1;
  235. }
  236. /*
  237. * This function writes to the queue of the client. The 'queue_type' determines
  238. * if this function is writing to the rx or tx queue
  239. */
  240. int hw_fence_update_queue(struct hw_fence_driver_data *drv_data,
  241. struct msm_hw_fence_client *hw_fence_client, u64 ctxt_id, u64 seqno, u64 hash,
  242. u64 flags, u64 client_data, u32 error, int queue_type)
  243. {
  244. struct msm_hw_fence_hfi_queue_header *hfi_header;
  245. struct msm_hw_fence_queue *queue;
  246. u32 read_idx;
  247. u32 write_idx;
  248. u32 to_write_idx;
  249. u32 q_size_u32;
  250. u32 q_free_u32;
  251. u32 *q_payload_write_ptr;
  252. u32 payload_size, payload_size_u32;
  253. struct msm_hw_fence_queue_payload *write_ptr_payload;
  254. bool lock_client = false;
  255. u32 lock_idx;
  256. u64 timestamp;
  257. u32 *wr_ptr;
  258. int ret = 0;
  259. if (queue_type >= hw_fence_client->queues_num) {
  260. HWFNC_ERR("Invalid queue type:%d client_id:%d q_num:%lu\n", queue_type,
  261. hw_fence_client->client_id, hw_fence_client->queues_num);
  262. return -EINVAL;
  263. }
  264. queue = &hw_fence_client->queues[queue_type];
  265. hfi_header = queue->va_header;
  266. q_size_u32 = (queue->q_size_bytes / sizeof(u32));
  267. payload_size = sizeof(struct msm_hw_fence_queue_payload);
  268. payload_size_u32 = (payload_size / sizeof(u32));
  269. if (!hfi_header) {
  270. HWFNC_ERR("Invalid queue\n");
  271. return -EINVAL;
  272. }
  273. /* if skipping update wr_index, then use hfi_header->tx_wm instead */
  274. if (queue->skip_wr_idx)
  275. wr_ptr = &hfi_header->tx_wm;
  276. else
  277. wr_ptr = &hfi_header->write_index;
  278. /*
  279. * We need to lock the client if there is an Rx Queue update, since that
  280. * is the only time when HW Fence driver can have a race condition updating
  281. * the Rx Queue, which also could be getting updated by the Fence CTL
  282. */
  283. lock_client = _lock_client_queue(queue_type);
  284. if (lock_client) {
  285. lock_idx = hw_fence_client->client_id - 1;
  286. if (lock_idx >= drv_data->client_lock_tbl_cnt) {
  287. HWFNC_ERR("lock for client id:%d exceed max:%d\n",
  288. hw_fence_client->client_id, drv_data->client_lock_tbl_cnt);
  289. return -EINVAL;
  290. }
  291. HWFNC_DBG_Q("Locking client id:%d: idx:%d\n", hw_fence_client->client_id, lock_idx);
  292. /* lock the client rx queue to update */
  293. GLOBAL_ATOMIC_STORE(drv_data, &drv_data->client_lock_tbl[lock_idx], 1); /* lock */
  294. }
  295. /* Make sure data is ready before read */
  296. mb();
  297. /* Get read and write index */
  298. read_idx = readl_relaxed(&hfi_header->read_index);
  299. write_idx = readl_relaxed(wr_ptr);
  300. HWFNC_DBG_Q("wr client:%d r_ptr:0x%pK w_ptr:0x%pK r_idx:%d w_idx:%d q:0x%pK type:%d s:%s\n",
  301. hw_fence_client->client_id, &hfi_header->read_index, wr_ptr,
  302. read_idx, write_idx, queue, queue_type, queue->skip_wr_idx ? "true" : "false");
  303. /* translate read and write indexes from custom indexing to dwords with no offset */
  304. if (queue->rd_wr_idx_start || queue->rd_wr_idx_factor != 1) {
  305. read_idx = (read_idx - queue->rd_wr_idx_start) * queue->rd_wr_idx_factor;
  306. write_idx = (write_idx - queue->rd_wr_idx_start) * queue->rd_wr_idx_factor;
  307. HWFNC_DBG_Q("rd_idx_u32:%lu wr_idx_u32:%lu rd_wr_idx start:%lu factor:%lu\n",
  308. read_idx, write_idx, queue->rd_wr_idx_start, queue->rd_wr_idx_factor);
  309. }
  310. /* Check queue to make sure message will fit */
  311. q_free_u32 = read_idx <= write_idx ? (q_size_u32 - (write_idx - read_idx)) :
  312. (read_idx - write_idx);
  313. if (q_free_u32 <= payload_size_u32) {
  314. HWFNC_ERR("cannot fit the message size:%d\n", payload_size_u32);
  315. ret = -EINVAL;
  316. goto exit;
  317. }
  318. HWFNC_DBG_Q("q_free_u32:%d payload_size_u32:%d\n", q_free_u32, payload_size_u32);
  319. /* Move the pointer where we need to write and cast it */
  320. q_payload_write_ptr = ((u32 *)queue->va_queue + write_idx);
  321. write_ptr_payload = (struct msm_hw_fence_queue_payload *)q_payload_write_ptr;
  322. HWFNC_DBG_Q("q_payload_write_ptr:0x%pK queue: va=0x%pK pa=0x%pK write_ptr_payload:0x%pK\n",
  323. q_payload_write_ptr, queue->va_queue, queue->pa_queue, write_ptr_payload);
  324. /* calculate the index after the write */
  325. to_write_idx = write_idx + payload_size_u32;
  326. HWFNC_DBG_Q("to_write_idx:%d write_idx:%d payload_size:%u\n", to_write_idx, write_idx,
  327. payload_size_u32);
  328. HWFNC_DBG_L("client_id:%d update %s hash:%llu ctx_id:%llu seqno:%llu flags:%llu error:%u\n",
  329. hw_fence_client->client_id, _get_queue_type(queue_type),
  330. hash, ctxt_id, seqno, flags, error);
  331. /*
  332. * wrap-around case, here we are writing to the last element of the queue, therefore
  333. * set to_write_idx, which is the index after the write, to the beginning of the
  334. * queue
  335. */
  336. if (to_write_idx >= q_size_u32)
  337. to_write_idx = 0;
  338. /* translate to_write_idx to custom indexing with offset */
  339. if (queue->rd_wr_idx_start || queue->rd_wr_idx_factor != 1) {
  340. to_write_idx = (to_write_idx / queue->rd_wr_idx_factor) + queue->rd_wr_idx_start;
  341. HWFNC_DBG_Q("translated to_write_idx:%lu rd_wr_idx start:%lu factor:%lu\n",
  342. to_write_idx, queue->rd_wr_idx_start, queue->rd_wr_idx_factor);
  343. }
  344. /* Update Client Queue */
  345. writeq_relaxed(payload_size, &write_ptr_payload->size);
  346. writew_relaxed(HW_FENCE_PAYLOAD_TYPE_1, &write_ptr_payload->type);
  347. writew_relaxed(HW_FENCE_PAYLOAD_REV(1, 0), &write_ptr_payload->version);
  348. writeq_relaxed(ctxt_id, &write_ptr_payload->ctxt_id);
  349. writeq_relaxed(seqno, &write_ptr_payload->seqno);
  350. writeq_relaxed(hash, &write_ptr_payload->hash);
  351. writeq_relaxed(flags, &write_ptr_payload->flags);
  352. writeq_relaxed(client_data, &write_ptr_payload->client_data);
  353. writel_relaxed(error, &write_ptr_payload->error);
  354. timestamp = hw_fence_get_qtime(drv_data);
  355. writel_relaxed(timestamp, &write_ptr_payload->timestamp_lo);
  356. writel_relaxed(timestamp >> 32, &write_ptr_payload->timestamp_hi);
  357. /* update memory for the message */
  358. wmb();
  359. /* update the write index */
  360. writel_relaxed(to_write_idx, wr_ptr);
  361. /* update memory for the index */
  362. wmb();
  363. exit:
  364. if (lock_client)
  365. GLOBAL_ATOMIC_STORE(drv_data, &drv_data->client_lock_tbl[lock_idx], 0); /* unlock */
  366. return ret;
  367. }
  368. static int init_global_locks(struct hw_fence_driver_data *drv_data)
  369. {
  370. struct msm_hw_fence_mem_addr *mem_descriptor;
  371. phys_addr_t phys;
  372. void *ptr;
  373. u32 size;
  374. int ret;
  375. ret = hw_fence_utils_reserve_mem(drv_data, HW_FENCE_MEM_RESERVE_LOCKS_REGION, &phys, &ptr,
  376. &size, 0);
  377. if (ret) {
  378. HWFNC_ERR("Failed to reserve clients locks mem %d\n", ret);
  379. return -ENOMEM;
  380. }
  381. HWFNC_DBG_INIT("phys:0x%x ptr:0x%pK size:%d\n", phys, ptr, size);
  382. /* Populate Memory descriptor with address */
  383. mem_descriptor = &drv_data->clients_locks_mem_desc;
  384. mem_descriptor->virtual_addr = ptr;
  385. mem_descriptor->device_addr = phys;
  386. mem_descriptor->size = size;
  387. mem_descriptor->mem_data = NULL; /* not storing special info for now */
  388. /* Initialize internal pointers for managing the tables */
  389. drv_data->client_lock_tbl = (u64 *)drv_data->clients_locks_mem_desc.virtual_addr;
  390. drv_data->client_lock_tbl_cnt = drv_data->clients_locks_mem_desc.size / sizeof(u64);
  391. return 0;
  392. }
  393. static int init_hw_fences_table(struct hw_fence_driver_data *drv_data)
  394. {
  395. struct msm_hw_fence_mem_addr *mem_descriptor;
  396. phys_addr_t phys;
  397. void *ptr;
  398. u32 size;
  399. int ret;
  400. ret = hw_fence_utils_reserve_mem(drv_data, HW_FENCE_MEM_RESERVE_TABLE, &phys, &ptr,
  401. &size, 0);
  402. if (ret) {
  403. HWFNC_ERR("Failed to reserve table mem %d\n", ret);
  404. return -ENOMEM;
  405. }
  406. HWFNC_DBG_INIT("phys:0x%x ptr:0x%pK size:%d\n", phys, ptr, size);
  407. /* Populate Memory descriptor with address */
  408. mem_descriptor = &drv_data->hw_fences_mem_desc;
  409. mem_descriptor->virtual_addr = ptr;
  410. mem_descriptor->device_addr = phys;
  411. mem_descriptor->size = size;
  412. mem_descriptor->mem_data = NULL; /* not storing special info for now */
  413. /* Initialize internal pointers for managing the tables */
  414. drv_data->hw_fences_tbl = (struct msm_hw_fence *)drv_data->hw_fences_mem_desc.virtual_addr;
  415. drv_data->hw_fences_tbl_cnt = drv_data->hw_fences_mem_desc.size /
  416. sizeof(struct msm_hw_fence);
  417. HWFNC_DBG_INIT("hw_fences_table:0x%pK cnt:%u\n", drv_data->hw_fences_tbl,
  418. drv_data->hw_fences_tbl_cnt);
  419. return 0;
  420. }
  421. static int init_hw_fences_events(struct hw_fence_driver_data *drv_data)
  422. {
  423. phys_addr_t phys;
  424. void *ptr;
  425. u32 size;
  426. int ret;
  427. ret = hw_fence_utils_reserve_mem(drv_data, HW_FENCE_MEM_RESERVE_EVENTS_BUFF, &phys, &ptr,
  428. &size, 0);
  429. if (ret) {
  430. HWFNC_DBG_INFO("Failed to reserve events buffer %d\n", ret);
  431. return -ENOMEM;
  432. }
  433. drv_data->events = (struct msm_hw_fence_event *)ptr;
  434. drv_data->total_events = size / sizeof(struct msm_hw_fence_event);
  435. HWFNC_DBG_INIT("events:0x%pK total_events:%u event_sz:%u total_size:%u\n", drv_data->events,
  436. drv_data->total_events, sizeof(struct msm_hw_fence_event), size);
  437. return 0;
  438. }
  439. static int init_ctrl_queue(struct hw_fence_driver_data *drv_data)
  440. {
  441. struct msm_hw_fence_mem_addr *mem_descriptor;
  442. int ret;
  443. mem_descriptor = &drv_data->ctrl_queue_mem_desc;
  444. /* Init ctrl queue */
  445. ret = init_hw_fences_queues(drv_data, HW_FENCE_MEM_RESERVE_CTRL_QUEUE,
  446. mem_descriptor, drv_data->ctrl_queues,
  447. HW_FENCE_CTRL_QUEUES, 0);
  448. if (ret)
  449. HWFNC_ERR("Failure to init ctrl queue\n");
  450. return ret;
  451. }
  452. int hw_fence_init(struct hw_fence_driver_data *drv_data)
  453. {
  454. int ret;
  455. __le32 *mem;
  456. ret = hw_fence_utils_parse_dt_props(drv_data);
  457. if (ret) {
  458. HWFNC_ERR("failed to set dt properties\n");
  459. goto exit;
  460. }
  461. /* Allocate hw fence driver mem pool and share it with HYP */
  462. ret = hw_fence_utils_alloc_mem(drv_data);
  463. if (ret) {
  464. HWFNC_ERR("failed to alloc base memory\n");
  465. goto exit;
  466. }
  467. /* Initialize ctrl queue */
  468. ret = init_ctrl_queue(drv_data);
  469. if (ret)
  470. goto exit;
  471. ret = init_global_locks(drv_data);
  472. if (ret)
  473. goto exit;
  474. HWFNC_DBG_INIT("Locks allocated at 0x%pK total locks:%d\n", drv_data->client_lock_tbl,
  475. drv_data->client_lock_tbl_cnt);
  476. /* Initialize hw fences table */
  477. ret = init_hw_fences_table(drv_data);
  478. if (ret)
  479. goto exit;
  480. /* Initialize event log */
  481. ret = init_hw_fences_events(drv_data);
  482. if (ret)
  483. HWFNC_DBG_INFO("Unable to init events\n");
  484. /* Map ipcc registers */
  485. ret = hw_fence_utils_map_ipcc(drv_data);
  486. if (ret) {
  487. HWFNC_ERR("ipcc regs mapping failed\n");
  488. goto exit;
  489. }
  490. /* Map time register */
  491. ret = hw_fence_utils_map_qtime(drv_data);
  492. if (ret) {
  493. HWFNC_ERR("qtime reg mapping failed\n");
  494. goto exit;
  495. }
  496. /* Init debugfs */
  497. ret = hw_fence_debug_debugfs_register(drv_data);
  498. if (ret) {
  499. HWFNC_ERR("debugfs init failed\n");
  500. goto exit;
  501. }
  502. /* Init vIRQ from VM */
  503. ret = hw_fence_utils_init_virq(drv_data);
  504. if (ret) {
  505. HWFNC_ERR("failed to init virq\n");
  506. goto exit;
  507. }
  508. mem = drv_data->io_mem_base;
  509. HWFNC_DBG_H("memory ptr:0x%pK val:0x%x\n", mem, *mem);
  510. HWFNC_DBG_INIT("HW Fences Table Initialized: 0x%pK cnt:%d\n",
  511. drv_data->hw_fences_tbl, drv_data->hw_fences_tbl_cnt);
  512. exit:
  513. return ret;
  514. }
  515. int hw_fence_alloc_client_resources(struct hw_fence_driver_data *drv_data,
  516. struct msm_hw_fence_client *hw_fence_client,
  517. struct msm_hw_fence_mem_addr *mem_descriptor)
  518. {
  519. int ret;
  520. if (!drv_data->hw_fence_client_queue_size[hw_fence_client->client_id].type) {
  521. HWFNC_ERR("invalid client_id:%d not reserved client queue; check dt props\n",
  522. hw_fence_client->client_id);
  523. return -EINVAL;
  524. }
  525. /* Init client queues */
  526. ret = init_hw_fences_queues(drv_data, HW_FENCE_MEM_RESERVE_CLIENT_QUEUE,
  527. &hw_fence_client->mem_descriptor, hw_fence_client->queues,
  528. drv_data->hw_fence_client_queue_size[hw_fence_client->client_id].type->queues_num,
  529. hw_fence_client->client_id);
  530. if (ret) {
  531. HWFNC_ERR("Failure to init the queue for client:%d\n",
  532. hw_fence_client->client_id);
  533. goto exit;
  534. }
  535. /* Init client memory descriptor */
  536. memcpy(mem_descriptor, &hw_fence_client->mem_descriptor,
  537. sizeof(struct msm_hw_fence_mem_addr));
  538. exit:
  539. return ret;
  540. }
  541. int hw_fence_init_controller_signal(struct hw_fence_driver_data *drv_data,
  542. struct msm_hw_fence_client *hw_fence_client)
  543. {
  544. int ret = 0;
  545. /*
  546. * Initialize IPCC Signals for this client
  547. *
  548. * NOTE: For each Client HW-Core, the client drivers might be the ones making
  549. * it's own initialization (in case that any hw-sequence must be enforced),
  550. * however, if that is not the case, any per-client ipcc init to enable the
  551. * signaling, can go here.
  552. */
  553. switch ((int)hw_fence_client->client_id_ext) {
  554. case HW_FENCE_CLIENT_ID_CTX0:
  555. /* nothing to initialize for gpu client */
  556. break;
  557. #if IS_ENABLED(CONFIG_DEBUG_FS)
  558. case HW_FENCE_CLIENT_ID_VAL0:
  559. case HW_FENCE_CLIENT_ID_VAL1:
  560. case HW_FENCE_CLIENT_ID_VAL2:
  561. case HW_FENCE_CLIENT_ID_VAL3:
  562. case HW_FENCE_CLIENT_ID_VAL4:
  563. case HW_FENCE_CLIENT_ID_VAL5:
  564. case HW_FENCE_CLIENT_ID_VAL6:
  565. /* nothing to initialize for validation clients */
  566. break;
  567. #endif /* CONFIG_DEBUG_FS */
  568. case HW_FENCE_CLIENT_ID_CTL0:
  569. case HW_FENCE_CLIENT_ID_CTL1:
  570. case HW_FENCE_CLIENT_ID_CTL2:
  571. case HW_FENCE_CLIENT_ID_CTL3:
  572. case HW_FENCE_CLIENT_ID_CTL4:
  573. case HW_FENCE_CLIENT_ID_CTL5:
  574. /* initialize ipcc signals for dpu clients */
  575. HWFNC_DBG_H("init_controller_signal: DPU client_id_ext:%d initialized:%d\n",
  576. hw_fence_client->client_id_ext, drv_data->ipcc_dpu_initialized);
  577. if (!drv_data->ipcc_dpu_initialized) {
  578. drv_data->ipcc_dpu_initialized = true;
  579. /* Init dpu client ipcc signal */
  580. hw_fence_ipcc_enable_dpu_signaling(drv_data);
  581. }
  582. break;
  583. case HW_FENCE_CLIENT_ID_IPE ... HW_FENCE_CLIENT_ID_IPE +
  584. MSM_HW_FENCE_MAX_SIGNAL_PER_CLIENT - 1:
  585. /* nothing to initialize for IPE client */
  586. break;
  587. case HW_FENCE_CLIENT_ID_VPU ... HW_FENCE_CLIENT_ID_VPU +
  588. MSM_HW_FENCE_MAX_SIGNAL_PER_CLIENT - 1:
  589. /* nothing to initialize for VPU client */
  590. break;
  591. case HW_FENCE_CLIENT_ID_IFE0 ... HW_FENCE_CLIENT_ID_IFE7 +
  592. MSM_HW_FENCE_MAX_SIGNAL_PER_CLIENT - 1:
  593. /* nothing to initialize for IFE clients */
  594. break;
  595. default:
  596. HWFNC_ERR("Unexpected client_id_ext:%d\n", hw_fence_client->client_id_ext);
  597. ret = -EINVAL;
  598. break;
  599. }
  600. return ret;
  601. }
  602. int hw_fence_init_controller_resources(struct msm_hw_fence_client *hw_fence_client)
  603. {
  604. /*
  605. * Initialize Fence Controller resources for this Client,
  606. * here we need to use the CTRL queue to communicate to the Fence
  607. * Controller the shared memory for the Rx/Tx queue for this client
  608. * as well as any information that Fence Controller might need to
  609. * know for this client.
  610. *
  611. * NOTE: For now, we are doing a static allocation of the
  612. * client's queues, so currently we don't need any notification
  613. * to the Fence CTL here through the CTRL queue.
  614. * Later-on we might need it, once the PVM to SVM (and vice versa)
  615. * communication for initialization is supported.
  616. */
  617. return 0;
  618. }
  619. void hw_fence_cleanup_client(struct hw_fence_driver_data *drv_data,
  620. struct msm_hw_fence_client *hw_fence_client)
  621. {
  622. /*
  623. * Deallocate any resource allocated for this client.
  624. * If fence controller was notified about existence of this client,
  625. * we will need to notify fence controller that this client is gone
  626. *
  627. * NOTE: Since currently we are doing a 'fixed' memory for the clients queues,
  628. * we don't need any notification to the Fence Controller, yet..
  629. * however, if the memory allocation is removed from 'fixed' to a dynamic
  630. * allocation, then we will need to notify FenceCTL about the client that is
  631. * going-away here.
  632. */
  633. mutex_lock(&drv_data->clients_register_lock);
  634. drv_data->clients[hw_fence_client->client_id] = NULL;
  635. mutex_unlock(&drv_data->clients_register_lock);
  636. /* Deallocate client's object */
  637. HWFNC_DBG_LUT("freeing client_id:%d\n", hw_fence_client->client_id);
  638. kfree(hw_fence_client);
  639. }
  640. static inline int _calculate_hash(u32 table_total_entries, u64 context, u64 seqno,
  641. u64 step, u64 *hash)
  642. {
  643. u64 m_size = table_total_entries;
  644. int val = 0;
  645. if (step == 0) {
  646. u64 a_multiplier = HW_FENCE_HASH_A_MULT;
  647. u64 c_multiplier = HW_FENCE_HASH_C_MULT;
  648. u64 b_multiplier = context + (context - 1); /* odd multiplier */
  649. /*
  650. * if m, is power of 2, we can optimize with right shift,
  651. * for now we don't do it, to avoid assuming a power of two
  652. */
  653. *hash = (a_multiplier * seqno * b_multiplier + (c_multiplier * context)) % m_size;
  654. } else {
  655. if (step >= m_size) {
  656. /*
  657. * If we already traversed the whole table, return failure since this means
  658. * there are not available spots, table is either full or full-enough
  659. * that we couldn't find an available spot after traverse the whole table.
  660. * Ideally table shouldn't be so full that we cannot find a value after some
  661. * iterations, so this maximum step size could be optimized to fail earlier.
  662. */
  663. HWFNC_ERR("Fence Table tranversed and no available space!\n");
  664. val = -EINVAL;
  665. } else {
  666. /*
  667. * Linearly increment the hash value to find next element in the table
  668. * note that this relies in the 'scrambled' data from the original hash
  669. * Also, add a mod division to wrap-around in case that we reached the
  670. * end of the table
  671. */
  672. *hash = (*hash + 1) % m_size;
  673. }
  674. }
  675. return val;
  676. }
  677. static inline struct msm_hw_fence *_get_hw_fence(u32 table_total_entries,
  678. struct msm_hw_fence *hw_fences_tbl,
  679. u64 hash)
  680. {
  681. if (hash >= table_total_entries) {
  682. HWFNC_ERR("hash:%llu out of max range:%llu\n",
  683. hash, table_total_entries);
  684. return NULL;
  685. }
  686. return &hw_fences_tbl[hash];
  687. }
  688. static bool _is_hw_fence_free(struct msm_hw_fence *hw_fence, u64 context, u64 seqno)
  689. {
  690. /* If valid is set, the hw fence is not free */
  691. return hw_fence->valid ? false : true;
  692. }
  693. static bool _hw_fence_match(struct msm_hw_fence *hw_fence, u64 context, u64 seqno)
  694. {
  695. return ((hw_fence->ctx_id == context && hw_fence->seq_id == seqno) ? true : false);
  696. }
  697. /* clears everything but the 'valid' field */
  698. static void _cleanup_hw_fence(struct msm_hw_fence *hw_fence)
  699. {
  700. int i;
  701. hw_fence->error = 0;
  702. wmb(); /* update memory to avoid mem-abort */
  703. hw_fence->ctx_id = 0;
  704. hw_fence->seq_id = 0;
  705. hw_fence->wait_client_mask = 0;
  706. hw_fence->fence_allocator = 0;
  707. hw_fence->fence_signal_client = 0;
  708. hw_fence->flags = 0;
  709. hw_fence->fence_create_time = 0;
  710. hw_fence->fence_trigger_time = 0;
  711. hw_fence->fence_wait_time = 0;
  712. hw_fence->debug_refcount = 0;
  713. hw_fence->parents_cnt = 0;
  714. hw_fence->pending_child_cnt = 0;
  715. for (i = 0; i < MSM_HW_FENCE_MAX_JOIN_PARENTS; i++)
  716. hw_fence->parent_list[i] = HW_FENCE_INVALID_PARENT_FENCE;
  717. memset(hw_fence->client_data, 0, sizeof(hw_fence->client_data));
  718. }
  719. /* This function must be called with the hw fence lock */
  720. static void _reserve_hw_fence(struct hw_fence_driver_data *drv_data,
  721. struct msm_hw_fence *hw_fence, u32 client_id,
  722. u64 context, u64 seqno, u32 hash, u32 pending_child_cnt)
  723. {
  724. _cleanup_hw_fence(hw_fence);
  725. /* reserve this HW fence */
  726. hw_fence->valid = 1;
  727. hw_fence->ctx_id = context;
  728. hw_fence->seq_id = seqno;
  729. hw_fence->flags = 0; /* fence just reserved, there shouldn't be any flags set */
  730. hw_fence->fence_allocator = client_id;
  731. hw_fence->fence_create_time = hw_fence_get_qtime(drv_data);
  732. hw_fence->debug_refcount++;
  733. HWFNC_DBG_LUT("Reserved fence client:%d ctx:%llu seq:%llu hash:%llu\n",
  734. client_id, context, seqno, hash);
  735. }
  736. /* This function must be called with the hw fence lock */
  737. static void _unreserve_hw_fence(struct hw_fence_driver_data *drv_data,
  738. struct msm_hw_fence *hw_fence, u32 client_id,
  739. u64 context, u64 seqno, u32 hash, u32 pending_child_cnt)
  740. {
  741. _cleanup_hw_fence(hw_fence);
  742. /* unreserve this HW fence */
  743. hw_fence->valid = 0;
  744. HWFNC_DBG_LUT("Unreserved fence client:%d ctx:%llu seq:%llu hash:%llu\n",
  745. client_id, context, seqno, hash);
  746. }
  747. /* This function must be called with the hw fence lock */
  748. static void _reserve_join_fence(struct hw_fence_driver_data *drv_data,
  749. struct msm_hw_fence *hw_fence, u32 client_id, u64 context,
  750. u64 seqno, u32 hash, u32 pending_child_cnt)
  751. {
  752. _cleanup_hw_fence(hw_fence);
  753. /* reserve this HW fence */
  754. hw_fence->valid = true;
  755. hw_fence->ctx_id = context;
  756. hw_fence->seq_id = seqno;
  757. hw_fence->fence_allocator = client_id;
  758. hw_fence->fence_create_time = hw_fence_get_qtime(drv_data);
  759. hw_fence->debug_refcount++;
  760. hw_fence->pending_child_cnt = pending_child_cnt;
  761. HWFNC_DBG_LUT("Reserved join fence client:%d ctx:%llu seq:%llu hash:%llu\n",
  762. client_id, context, seqno, hash);
  763. }
  764. /* This function must be called with the hw fence lock */
  765. static void _fence_found(struct hw_fence_driver_data *drv_data,
  766. struct msm_hw_fence *hw_fence, u32 client_id,
  767. u64 context, u64 seqno, u32 hash, u32 pending_child_cnt)
  768. {
  769. /*
  770. * Do nothing, when this find fence fn is invoked, all processing is done outside.
  771. * Currently just keeping this function for debugging purposes, can be removed
  772. * in final versions
  773. */
  774. HWFNC_DBG_LUT("Found fence client:%d ctx:%llu seq:%llu hash:%llu\n",
  775. client_id, context, seqno, hash);
  776. }
  777. char *_get_op_mode(enum hw_fence_lookup_ops op_code)
  778. {
  779. switch (op_code) {
  780. case HW_FENCE_LOOKUP_OP_CREATE:
  781. return "CREATE";
  782. case HW_FENCE_LOOKUP_OP_DESTROY:
  783. return "DESTROY";
  784. case HW_FENCE_LOOKUP_OP_CREATE_JOIN:
  785. return "CREATE_JOIN";
  786. case HW_FENCE_LOOKUP_OP_FIND_FENCE:
  787. return "FIND_FENCE";
  788. default:
  789. return "UNKNOWN";
  790. }
  791. return "UNKNOWN";
  792. }
  793. struct msm_hw_fence *_hw_fence_lookup_and_process(struct hw_fence_driver_data *drv_data,
  794. struct msm_hw_fence *hw_fences_tbl, u64 context, u64 seqno, u32 client_id,
  795. u32 pending_child_cnt, enum hw_fence_lookup_ops op_code, u64 *hash)
  796. {
  797. bool (*compare_fnc)(struct msm_hw_fence *hfence, u64 context, u64 seqno);
  798. void (*process_fnc)(struct hw_fence_driver_data *drv_data, struct msm_hw_fence *hfence,
  799. u32 client_id, u64 context, u64 seqno, u32 hash, u32 pending);
  800. struct msm_hw_fence *hw_fence = NULL;
  801. u64 step = 0;
  802. int ret = 0;
  803. bool hw_fence_found = false;
  804. if (!hash | !drv_data | !hw_fences_tbl) {
  805. HWFNC_ERR("Invalid input for hw_fence_lookup\n");
  806. return NULL;
  807. }
  808. *hash = ~0;
  809. HWFNC_DBG_LUT("hw_fence_lookup: %d\n", op_code);
  810. switch (op_code) {
  811. case HW_FENCE_LOOKUP_OP_CREATE:
  812. compare_fnc = &_is_hw_fence_free;
  813. process_fnc = &_reserve_hw_fence;
  814. break;
  815. case HW_FENCE_LOOKUP_OP_DESTROY:
  816. compare_fnc = &_hw_fence_match;
  817. process_fnc = &_unreserve_hw_fence;
  818. break;
  819. case HW_FENCE_LOOKUP_OP_CREATE_JOIN:
  820. compare_fnc = &_is_hw_fence_free;
  821. process_fnc = &_reserve_join_fence;
  822. break;
  823. case HW_FENCE_LOOKUP_OP_FIND_FENCE:
  824. compare_fnc = &_hw_fence_match;
  825. process_fnc = &_fence_found;
  826. break;
  827. default:
  828. HWFNC_ERR("Unknown op code:%d\n", op_code);
  829. return NULL;
  830. }
  831. while (!hw_fence_found && (step < drv_data->hw_fence_table_entries)) {
  832. /* Calculate the Hash for the Fence */
  833. ret = _calculate_hash(drv_data->hw_fence_table_entries, context, seqno, step, hash);
  834. if (ret) {
  835. HWFNC_ERR("error calculating hash ctx:%llu seqno:%llu hash:%llu\n",
  836. context, seqno, *hash);
  837. break;
  838. }
  839. HWFNC_DBG_LUT("calculated hash:%llu [ctx:%llu seqno:%llu]\n", *hash, context,
  840. seqno);
  841. /* Get element from the table using the hash */
  842. hw_fence = _get_hw_fence(drv_data->hw_fence_table_entries, hw_fences_tbl, *hash);
  843. HWFNC_DBG_LUT("hw_fence_tbl:0x%pK hw_fence:0x%pK, hash:%llu valid:0x%x\n",
  844. hw_fences_tbl, hw_fence, *hash, hw_fence ? hw_fence->valid : 0xbad);
  845. if (!hw_fence) {
  846. HWFNC_ERR("bad hw fence ctx:%llu seqno:%llu hash:%llu\n",
  847. context, seqno, *hash);
  848. break;
  849. }
  850. GLOBAL_ATOMIC_STORE(drv_data, &hw_fence->lock, 1);
  851. /* compare to either find a free fence or find an allocated fence */
  852. if (compare_fnc(hw_fence, context, seqno)) {
  853. /* Process the hw fence found by the algorithm */
  854. if (process_fnc) {
  855. process_fnc(drv_data, hw_fence, client_id, context, seqno, *hash,
  856. pending_child_cnt);
  857. /* update memory table with processing */
  858. wmb();
  859. }
  860. HWFNC_DBG_L("client_id:%lu op:%s ctx:%llu seqno:%llu hash:%llu step:%llu\n",
  861. client_id, _get_op_mode(op_code), context, seqno, *hash, step);
  862. hw_fence_found = true;
  863. } else {
  864. if ((op_code == HW_FENCE_LOOKUP_OP_CREATE ||
  865. op_code == HW_FENCE_LOOKUP_OP_CREATE_JOIN) &&
  866. seqno == hw_fence->seq_id && context == hw_fence->ctx_id) {
  867. /* ctx & seqno must be unique creating a hw-fence */
  868. HWFNC_ERR("cannot create hw fence with same ctx:%llu seqno:%llu\n",
  869. context, seqno);
  870. GLOBAL_ATOMIC_STORE(drv_data, &hw_fence->lock, 0);
  871. break;
  872. }
  873. /* compare can fail if we have a collision, we will linearly resolve it */
  874. HWFNC_DBG_H("compare failed for hash:%llu [ctx:%llu seqno:%llu]\n", *hash,
  875. context, seqno);
  876. }
  877. GLOBAL_ATOMIC_STORE(drv_data, &hw_fence->lock, 0);
  878. /* Increment step for the next loop */
  879. step++;
  880. }
  881. /* If we iterated through the whole list and didn't find the fence, return null */
  882. if (!hw_fence_found) {
  883. HWFNC_ERR("fail to create hw-fence step:%llu\n", step);
  884. hw_fence = NULL;
  885. }
  886. HWFNC_DBG_LUT("lookup:%d hw_fence:%pK ctx:%llu seqno:%llu hash:%llu flags:0x%llx\n",
  887. op_code, hw_fence, context, seqno, *hash, hw_fence ? hw_fence->flags : -1);
  888. return hw_fence;
  889. }
  890. int hw_fence_create(struct hw_fence_driver_data *drv_data,
  891. struct msm_hw_fence_client *hw_fence_client,
  892. u64 context, u64 seqno, u64 *hash)
  893. {
  894. u32 client_id = hw_fence_client->client_id;
  895. struct msm_hw_fence *hw_fences_tbl = drv_data->hw_fences_tbl;
  896. int ret = 0;
  897. /* allocate hw fence in table */
  898. if (!_hw_fence_lookup_and_process(drv_data, hw_fences_tbl,
  899. context, seqno, client_id, 0, HW_FENCE_LOOKUP_OP_CREATE, hash)) {
  900. HWFNC_ERR("Fail to create fence client:%lu ctx:%llu seqno:%llu\n",
  901. client_id, context, seqno);
  902. ret = -EINVAL;
  903. }
  904. return ret;
  905. }
  906. static inline int _hw_fence_cleanup(struct hw_fence_driver_data *drv_data,
  907. struct msm_hw_fence *hw_fences_tbl, u32 client_id, u64 context, u64 seqno) {
  908. u64 hash;
  909. if (!_hw_fence_lookup_and_process(drv_data, hw_fences_tbl,
  910. context, seqno, client_id, 0, HW_FENCE_LOOKUP_OP_DESTROY, &hash))
  911. return -EINVAL;
  912. return 0;
  913. }
  914. int hw_fence_destroy(struct hw_fence_driver_data *drv_data,
  915. struct msm_hw_fence_client *hw_fence_client,
  916. u64 context, u64 seqno)
  917. {
  918. u32 client_id = hw_fence_client->client_id;
  919. struct msm_hw_fence *hw_fences_tbl = drv_data->hw_fences_tbl;
  920. int ret = 0;
  921. /* remove hw fence from table*/
  922. if (_hw_fence_cleanup(drv_data, hw_fences_tbl, client_id, context, seqno)) {
  923. HWFNC_ERR("Fail destroying fence client:%lu ctx:%llu seqno:%llu\n",
  924. client_id, context, seqno);
  925. ret = -EINVAL;
  926. }
  927. return ret;
  928. }
  929. int hw_fence_destroy_with_hash(struct hw_fence_driver_data *drv_data,
  930. struct msm_hw_fence_client *hw_fence_client, u64 hash)
  931. {
  932. u32 client_id = hw_fence_client->client_id;
  933. struct msm_hw_fence *hw_fences_tbl = drv_data->hw_fences_tbl;
  934. struct msm_hw_fence *hw_fence = NULL;
  935. int ret = 0;
  936. hw_fence = _get_hw_fence(drv_data->hw_fence_table_entries, hw_fences_tbl, hash);
  937. if (!hw_fence) {
  938. HWFNC_ERR("bad hw fence hash:%llu client:%lu\n", hash, client_id);
  939. return -EINVAL;
  940. }
  941. if (hw_fence->fence_allocator != client_id) {
  942. HWFNC_ERR("client:%lu cannot destroy fence hash:%llu fence_allocator:%lu\n",
  943. client_id, hash, hw_fence->fence_allocator);
  944. return -EINVAL;
  945. }
  946. /* remove hw fence from table*/
  947. if (_hw_fence_cleanup(drv_data, hw_fences_tbl, client_id, hw_fence->ctx_id,
  948. hw_fence->seq_id)) {
  949. HWFNC_ERR("Fail destroying fence client:%lu ctx:%llu seqno:%llu hash:%llu\n",
  950. client_id, hw_fence->ctx_id, hw_fence->seq_id, hash);
  951. ret = -EINVAL;
  952. }
  953. return ret;
  954. }
  955. static struct msm_hw_fence *_hw_fence_process_join_fence(struct hw_fence_driver_data *drv_data,
  956. struct msm_hw_fence_client *hw_fence_client,
  957. struct dma_fence_array *array, u64 *hash, bool create)
  958. {
  959. struct msm_hw_fence *hw_fences_tbl;
  960. struct msm_hw_fence *join_fence = NULL;
  961. u64 context, seqno;
  962. u32 client_id, pending_child_cnt;
  963. /*
  964. * NOTE: For now we are allocating the join fences from the same table as all
  965. * the other fences (i.e. drv_data->hw_fences_tbl), functionally this will work, however,
  966. * this might impact the lookup algorithm, since the "join-fences" are created with the
  967. * context and seqno of a fence-array, and those might not be changing by the client,
  968. * so this will linearly increment the look-up and very likely impact the other fences if
  969. * these join-fences start to fill-up a particular region of the fences global table.
  970. * So we might have to allocate a different table altogether for these join fences.
  971. * However, to do this, just alloc another table and change it here:
  972. */
  973. hw_fences_tbl = drv_data->hw_fences_tbl;
  974. context = array->base.context;
  975. seqno = array->base.seqno;
  976. pending_child_cnt = array->num_fences;
  977. client_id = HW_FENCE_JOIN_FENCE_CLIENT_ID;
  978. if (create) {
  979. /* allocate the fence */
  980. join_fence = _hw_fence_lookup_and_process(drv_data, hw_fences_tbl, context,
  981. seqno, client_id, pending_child_cnt, HW_FENCE_LOOKUP_OP_CREATE_JOIN, hash);
  982. if (!join_fence)
  983. HWFNC_ERR("Fail to create join fence client:%lu ctx:%llu seqno:%llu\n",
  984. client_id, context, seqno);
  985. } else {
  986. /* destroy the fence */
  987. if (_hw_fence_cleanup(drv_data, hw_fences_tbl, client_id, context, seqno))
  988. HWFNC_ERR("Fail destroying join fence client:%lu ctx:%llu seqno:%llu\n",
  989. client_id, context, seqno);
  990. }
  991. return join_fence;
  992. }
  993. struct msm_hw_fence *msm_hw_fence_find(struct hw_fence_driver_data *drv_data,
  994. struct msm_hw_fence_client *hw_fence_client,
  995. u64 context, u64 seqno, u64 *hash)
  996. {
  997. struct msm_hw_fence *hw_fences_tbl = drv_data->hw_fences_tbl;
  998. struct msm_hw_fence *hw_fence;
  999. u32 client_id = hw_fence_client ? hw_fence_client->client_id : 0xff;
  1000. /* find the hw fence */
  1001. hw_fence = _hw_fence_lookup_and_process(drv_data, hw_fences_tbl, context,
  1002. seqno, client_id, 0, HW_FENCE_LOOKUP_OP_FIND_FENCE, hash);
  1003. if (!hw_fence)
  1004. HWFNC_ERR("Fail to find hw fence client:%lu ctx:%llu seqno:%llu\n",
  1005. client_id, context, seqno);
  1006. return hw_fence;
  1007. }
  1008. static void _fence_ctl_signal(struct hw_fence_driver_data *drv_data,
  1009. struct msm_hw_fence_client *hw_fence_client, struct msm_hw_fence *hw_fence, u64 hash,
  1010. u64 flags, u64 client_data, u32 error)
  1011. {
  1012. u32 tx_client_id = drv_data->ipcc_client_pid; /* phys id for tx client */
  1013. u32 rx_client_id = hw_fence_client->ipc_client_vid; /* virt id for rx client */
  1014. HWFNC_DBG_H("We must signal the client now! hfence hash:%llu\n", hash);
  1015. /* Call fence error callback */
  1016. if (error && hw_fence_client->fence_error_cb) {
  1017. hw_fence_utils_fence_error_cb(hw_fence_client, hw_fence->ctx_id, hw_fence->seq_id,
  1018. hash, flags, error);
  1019. } else {
  1020. /* Write to Rx queue */
  1021. if (hw_fence_client->update_rxq)
  1022. hw_fence_update_queue(drv_data, hw_fence_client, hw_fence->ctx_id,
  1023. hw_fence->seq_id, hash, flags, client_data, error,
  1024. HW_FENCE_RX_QUEUE - 1);
  1025. /* Signal the hw fence now */
  1026. if (hw_fence_client->send_ipc)
  1027. hw_fence_ipcc_trigger_signal(drv_data, tx_client_id, rx_client_id,
  1028. hw_fence_client->ipc_signal_id);
  1029. }
  1030. #if IS_ENABLED(CONFIG_DEBUG_FS)
  1031. if (hw_fence_client->client_id >= HW_FENCE_CLIENT_ID_VAL0
  1032. && hw_fence_client->client_id <= HW_FENCE_CLIENT_ID_VAL6)
  1033. process_validation_client_loopback(drv_data, hw_fence_client->client_id);
  1034. #endif /* CONFIG_DEBUG_FS */
  1035. }
  1036. static void _cleanup_join_and_child_fences(struct hw_fence_driver_data *drv_data,
  1037. struct msm_hw_fence_client *hw_fence_client, int iteration, struct dma_fence_array *array,
  1038. struct msm_hw_fence *join_fence, u64 hash_join_fence)
  1039. {
  1040. struct dma_fence *child_fence;
  1041. struct msm_hw_fence *hw_fence_child;
  1042. int idx, j;
  1043. u64 hash = 0;
  1044. if (!array->fences)
  1045. goto destroy_fence;
  1046. /* cleanup the child-fences from the parent join-fence */
  1047. for (idx = iteration; idx >= 0; idx--) {
  1048. child_fence = array->fences[idx];
  1049. if (!child_fence) {
  1050. HWFNC_ERR("invalid child fence idx:%d\n", idx);
  1051. continue;
  1052. }
  1053. hw_fence_child = msm_hw_fence_find(drv_data, hw_fence_client, child_fence->context,
  1054. child_fence->seqno, &hash);
  1055. if (!hw_fence_child) {
  1056. HWFNC_ERR("Cannot cleanup child fence context:%lu seqno:%lu hash:%lu\n",
  1057. child_fence->context, child_fence->seqno, hash);
  1058. /*
  1059. * ideally this should not have happened, but if it did, try to keep
  1060. * cleaning-up other fences after printing the error
  1061. */
  1062. continue;
  1063. }
  1064. /* lock the child while we clean it up from the parent join-fence */
  1065. GLOBAL_ATOMIC_STORE(drv_data, &hw_fence_child->lock, 1); /* lock */
  1066. for (j = hw_fence_child->parents_cnt; j > 0; j--) {
  1067. if (j > MSM_HW_FENCE_MAX_JOIN_PARENTS) {
  1068. HWFNC_ERR("Invalid max parents_cnt:%d, will reset to max:%d\n",
  1069. hw_fence_child->parents_cnt, MSM_HW_FENCE_MAX_JOIN_PARENTS);
  1070. j = MSM_HW_FENCE_MAX_JOIN_PARENTS;
  1071. }
  1072. if (hw_fence_child->parent_list[j - 1] == hash_join_fence) {
  1073. hw_fence_child->parent_list[j - 1] = HW_FENCE_INVALID_PARENT_FENCE;
  1074. if (hw_fence_child->parents_cnt)
  1075. hw_fence_child->parents_cnt--;
  1076. /* update memory for the table update */
  1077. wmb();
  1078. }
  1079. }
  1080. GLOBAL_ATOMIC_STORE(drv_data, &hw_fence_child->lock, 0); /* unlock */
  1081. }
  1082. destroy_fence:
  1083. /* destroy join fence */
  1084. _hw_fence_process_join_fence(drv_data, hw_fence_client, array, &hash_join_fence,
  1085. false);
  1086. }
  1087. int hw_fence_process_fence_array(struct hw_fence_driver_data *drv_data,
  1088. struct msm_hw_fence_client *hw_fence_client, struct dma_fence_array *array,
  1089. u64 *hash_join_fence, u64 client_data)
  1090. {
  1091. struct msm_hw_fence *join_fence;
  1092. struct msm_hw_fence *hw_fence_child;
  1093. struct dma_fence *child_fence;
  1094. bool signal_join_fence = false;
  1095. u64 hash;
  1096. int i, ret = 0;
  1097. enum hw_fence_client_data_id data_id;
  1098. if (client_data) {
  1099. data_id = hw_fence_get_client_data_id(hw_fence_client->client_id_ext);
  1100. if (data_id >= HW_FENCE_MAX_CLIENTS_WITH_DATA) {
  1101. HWFNC_ERR("Populating client_data:%llu with invalid client_id_ext:%d\n",
  1102. client_data, hw_fence_client->client_id_ext);
  1103. return -EINVAL;
  1104. }
  1105. }
  1106. /*
  1107. * Create join fence from the join-fences table,
  1108. * This function initializes:
  1109. * join_fence->pending_child_count = array->num_fences
  1110. */
  1111. join_fence = _hw_fence_process_join_fence(drv_data, hw_fence_client, array,
  1112. hash_join_fence, true);
  1113. if (!join_fence) {
  1114. HWFNC_ERR("cannot alloc hw fence for join fence array\n");
  1115. return -EINVAL;
  1116. }
  1117. /* update this as waiting client of the join-fence */
  1118. GLOBAL_ATOMIC_STORE(drv_data, &join_fence->lock, 1); /* lock */
  1119. join_fence->wait_client_mask |= BIT(hw_fence_client->client_id);
  1120. GLOBAL_ATOMIC_STORE(drv_data, &join_fence->lock, 0); /* unlock */
  1121. /* Iterate through fences of the array */
  1122. for (i = 0; i < array->num_fences; i++) {
  1123. child_fence = array->fences[i];
  1124. /* Nested fence-arrays are not supported */
  1125. if (to_dma_fence_array(child_fence)) {
  1126. HWFNC_ERR("This is a nested fence, fail!\n");
  1127. ret = -EINVAL;
  1128. goto error_array;
  1129. }
  1130. /* All elements in the fence-array must be hw-fences */
  1131. if (!test_bit(MSM_HW_FENCE_FLAG_ENABLED_BIT, &child_fence->flags)) {
  1132. HWFNC_ERR("DMA Fence in FenceArray is not a HW Fence\n");
  1133. ret = -EINVAL;
  1134. goto error_array;
  1135. }
  1136. /* Find the HW Fence in the Global Table */
  1137. hw_fence_child = msm_hw_fence_find(drv_data, hw_fence_client, child_fence->context,
  1138. child_fence->seqno, &hash);
  1139. if (!hw_fence_child) {
  1140. HWFNC_ERR("Cannot find child fence context:%lu seqno:%lu hash:%lu\n",
  1141. child_fence->context, child_fence->seqno, hash);
  1142. ret = -EINVAL;
  1143. goto error_array;
  1144. }
  1145. GLOBAL_ATOMIC_STORE(drv_data, &hw_fence_child->lock, 1); /* lock */
  1146. if (hw_fence_child->flags & MSM_HW_FENCE_FLAG_SIGNAL) {
  1147. /* child fence is already signaled */
  1148. GLOBAL_ATOMIC_STORE(drv_data, &join_fence->lock, 1); /* lock */
  1149. join_fence->error |= hw_fence_child->error;
  1150. if (--join_fence->pending_child_cnt == 0)
  1151. signal_join_fence = true;
  1152. /* update memory for the table update */
  1153. wmb();
  1154. GLOBAL_ATOMIC_STORE(drv_data, &join_fence->lock, 0); /* unlock */
  1155. } else {
  1156. /* child fence is not signaled */
  1157. hw_fence_child->parents_cnt++;
  1158. if (hw_fence_child->parents_cnt >= MSM_HW_FENCE_MAX_JOIN_PARENTS
  1159. || hw_fence_child->parents_cnt < 1) {
  1160. /* Max number of parents for a fence is exceeded */
  1161. HWFNC_ERR("DMA Fence in FenceArray exceeds parents:%d\n",
  1162. hw_fence_child->parents_cnt);
  1163. hw_fence_child->parents_cnt--;
  1164. /* update memory for the table update */
  1165. wmb();
  1166. /* unlock */
  1167. GLOBAL_ATOMIC_STORE(drv_data, &hw_fence_child->lock, 0);
  1168. ret = -EINVAL;
  1169. goto error_array;
  1170. }
  1171. hw_fence_child->parent_list[hw_fence_child->parents_cnt - 1] =
  1172. *hash_join_fence;
  1173. /* update memory for the table update */
  1174. wmb();
  1175. }
  1176. GLOBAL_ATOMIC_STORE(drv_data, &hw_fence_child->lock, 0); /* unlock */
  1177. }
  1178. if (client_data)
  1179. join_fence->client_data[data_id] = client_data;
  1180. /* all fences were signaled, signal client now */
  1181. if (signal_join_fence) {
  1182. /* signal the join hw fence */
  1183. _fence_ctl_signal(drv_data, hw_fence_client, join_fence, *hash_join_fence, 0,
  1184. client_data, join_fence->error);
  1185. set_bit(MSM_HW_FENCE_FLAG_SIGNALED_BIT, &array->base.flags);
  1186. /*
  1187. * job of the join-fence is finished since we already signaled,
  1188. * we can delete it now. This can happen when all the fences that
  1189. * are part of the join-fence are already signaled.
  1190. */
  1191. _hw_fence_process_join_fence(drv_data, hw_fence_client, array, hash_join_fence,
  1192. false);
  1193. } else if (!array->num_fences) {
  1194. /*
  1195. * if we didn't signal the join-fence and the number of fences is not set in
  1196. * the fence-array, then fail here, otherwise driver would create a join-fence
  1197. * with no-childs that won't be signaled at all or an incomplete join-fence
  1198. */
  1199. HWFNC_ERR("invalid fence-array ctx:%llu seqno:%llu without fences\n",
  1200. array->base.context, array->base.seqno);
  1201. goto error_array;
  1202. }
  1203. return ret;
  1204. error_array:
  1205. _cleanup_join_and_child_fences(drv_data, hw_fence_client, i, array, join_fence,
  1206. *hash_join_fence);
  1207. return -EINVAL;
  1208. }
  1209. int hw_fence_register_wait_client(struct hw_fence_driver_data *drv_data,
  1210. struct dma_fence *fence, struct msm_hw_fence_client *hw_fence_client, u64 context,
  1211. u64 seqno, u64 *hash, u64 client_data)
  1212. {
  1213. struct msm_hw_fence *hw_fence;
  1214. enum hw_fence_client_data_id data_id;
  1215. if (client_data) {
  1216. data_id = hw_fence_get_client_data_id(hw_fence_client->client_id_ext);
  1217. if (data_id >= HW_FENCE_MAX_CLIENTS_WITH_DATA) {
  1218. HWFNC_ERR("Populating client_data:%llu with invalid client_id_ext:%d\n",
  1219. client_data, hw_fence_client->client_id);
  1220. return -EINVAL;
  1221. }
  1222. }
  1223. /* find the hw fence within the table */
  1224. hw_fence = msm_hw_fence_find(drv_data, hw_fence_client, context, seqno, hash);
  1225. if (!hw_fence) {
  1226. HWFNC_ERR("Cannot find fence!\n");
  1227. return -EINVAL;
  1228. }
  1229. GLOBAL_ATOMIC_STORE(drv_data, &hw_fence->lock, 1); /* lock */
  1230. /* register client in the hw fence */
  1231. hw_fence->wait_client_mask |= BIT(hw_fence_client->client_id);
  1232. hw_fence->fence_wait_time = hw_fence_get_qtime(drv_data);
  1233. hw_fence->debug_refcount++;
  1234. if (client_data)
  1235. hw_fence->client_data[data_id] = client_data;
  1236. /* update memory for the table update */
  1237. wmb();
  1238. GLOBAL_ATOMIC_STORE(drv_data, &hw_fence->lock, 0); /* unlock */
  1239. /* if hw fence already signaled, signal the client */
  1240. if (hw_fence->flags & MSM_HW_FENCE_FLAG_SIGNAL) {
  1241. if (fence != NULL)
  1242. set_bit(MSM_HW_FENCE_FLAG_SIGNALED_BIT, &fence->flags);
  1243. _fence_ctl_signal(drv_data, hw_fence_client, hw_fence, *hash, 0, client_data, 0);
  1244. }
  1245. return 0;
  1246. }
  1247. int hw_fence_process_fence(struct hw_fence_driver_data *drv_data,
  1248. struct msm_hw_fence_client *hw_fence_client,
  1249. struct dma_fence *fence, u64 *hash, u64 client_data)
  1250. {
  1251. int ret = 0;
  1252. if (!drv_data | !hw_fence_client | !fence) {
  1253. HWFNC_ERR("Invalid Input!\n");
  1254. return -EINVAL;
  1255. }
  1256. /* fence must be hw-fence */
  1257. if (!test_bit(MSM_HW_FENCE_FLAG_ENABLED_BIT, &fence->flags)) {
  1258. HWFNC_ERR("DMA Fence in is not a HW Fence flags:0x%llx\n", fence->flags);
  1259. return -EINVAL;
  1260. }
  1261. ret = hw_fence_register_wait_client(drv_data, fence, hw_fence_client, fence->context,
  1262. fence->seqno, hash, client_data);
  1263. if (ret)
  1264. HWFNC_ERR("Error registering for wait client:%d\n", hw_fence_client->client_id);
  1265. return ret;
  1266. }
  1267. static void _signal_all_wait_clients(struct hw_fence_driver_data *drv_data,
  1268. struct msm_hw_fence *hw_fence, u64 hash, int error)
  1269. {
  1270. enum hw_fence_client_id wait_client_id;
  1271. enum hw_fence_client_data_id data_id;
  1272. struct msm_hw_fence_client *hw_fence_wait_client;
  1273. u64 client_data = 0;
  1274. /* signal with an error all the waiting clients for this fence */
  1275. for (wait_client_id = 0; wait_client_id <= drv_data->rxq_clients_num; wait_client_id++) {
  1276. if (hw_fence->wait_client_mask & BIT(wait_client_id)) {
  1277. hw_fence_wait_client = drv_data->clients[wait_client_id];
  1278. data_id = hw_fence_get_client_data_id(hw_fence_wait_client->client_id_ext);
  1279. if (data_id < HW_FENCE_MAX_CLIENTS_WITH_DATA)
  1280. client_data = hw_fence->client_data[data_id];
  1281. if (hw_fence_wait_client)
  1282. _fence_ctl_signal(drv_data, hw_fence_wait_client, hw_fence,
  1283. hash, 0, client_data, error);
  1284. }
  1285. }
  1286. }
  1287. void hw_fence_utils_reset_queues(struct hw_fence_driver_data *drv_data,
  1288. struct msm_hw_fence_client *hw_fence_client)
  1289. {
  1290. struct msm_hw_fence_hfi_queue_header *hfi_header;
  1291. struct msm_hw_fence_queue *queue;
  1292. u32 rd_idx, wr_idx, lock_idx;
  1293. queue = &hw_fence_client->queues[HW_FENCE_TX_QUEUE - 1];
  1294. hfi_header = queue->va_header;
  1295. /* For the client TxQ: set the read-index same as last write that was done by the client */
  1296. mb(); /* make sure data is ready before read */
  1297. wr_idx = readl_relaxed(&hfi_header->write_index);
  1298. if (queue->skip_wr_idx)
  1299. hfi_header->tx_wm = wr_idx;
  1300. writel_relaxed(wr_idx, &hfi_header->read_index);
  1301. wmb(); /* make sure data is updated after write the index*/
  1302. HWFNC_DBG_Q("update tx queue %s to match write_index:%lu\n",
  1303. queue->skip_wr_idx ? "read_index=tx_wm" : "read_index", wr_idx);
  1304. /* For the client RxQ: set the write-index same as last read done by the client */
  1305. if (hw_fence_client->update_rxq) {
  1306. lock_idx = hw_fence_client->client_id - 1;
  1307. if (lock_idx >= drv_data->client_lock_tbl_cnt) {
  1308. HWFNC_ERR("cannot reset rxq, lock for client id:%d exceed max:%d\n",
  1309. hw_fence_client->client_id, drv_data->client_lock_tbl_cnt);
  1310. return;
  1311. }
  1312. HWFNC_DBG_Q("Locking client id:%d: idx:%d\n", hw_fence_client->client_id, lock_idx);
  1313. /* lock the client rx queue to update */
  1314. GLOBAL_ATOMIC_STORE(drv_data, &drv_data->client_lock_tbl[lock_idx], 1);
  1315. queue = &hw_fence_client->queues[HW_FENCE_RX_QUEUE - 1];
  1316. hfi_header = queue->va_header;
  1317. mb(); /* make sure data is ready before read */
  1318. rd_idx = readl_relaxed(&hfi_header->read_index);
  1319. writel_relaxed(rd_idx, &hfi_header->write_index);
  1320. wmb(); /* make sure data is updated after write the index */
  1321. /* unlock */
  1322. GLOBAL_ATOMIC_STORE(drv_data, &drv_data->client_lock_tbl[lock_idx], 0);
  1323. HWFNC_DBG_Q("update rx queue write_index to match read_index:%lu\n", rd_idx);
  1324. }
  1325. }
  1326. int hw_fence_utils_cleanup_fence(struct hw_fence_driver_data *drv_data,
  1327. struct msm_hw_fence_client *hw_fence_client, struct msm_hw_fence *hw_fence, u64 hash,
  1328. u32 reset_flags)
  1329. {
  1330. int ret = 0;
  1331. int error = (reset_flags & MSM_HW_FENCE_RESET_WITHOUT_ERROR) ? 0 : MSM_HW_FENCE_ERROR_RESET;
  1332. GLOBAL_ATOMIC_STORE(drv_data, &hw_fence->lock, 1); /* lock */
  1333. if (hw_fence->wait_client_mask & BIT(hw_fence_client->client_id)) {
  1334. HWFNC_DBG_H("clearing client:%d wait bit for fence: ctx:%d seqno:%d\n",
  1335. hw_fence_client->client_id, hw_fence->ctx_id,
  1336. hw_fence->seq_id);
  1337. hw_fence->wait_client_mask &= ~BIT(hw_fence_client->client_id);
  1338. /* update memory for the table update */
  1339. wmb();
  1340. }
  1341. GLOBAL_ATOMIC_STORE(drv_data, &hw_fence->lock, 0); /* unlock */
  1342. if (hw_fence->fence_allocator == hw_fence_client->client_id) {
  1343. /* if fence is not signaled, signal with error all the waiting clients */
  1344. if (!(hw_fence->flags & MSM_HW_FENCE_FLAG_SIGNAL))
  1345. _signal_all_wait_clients(drv_data, hw_fence, hash, error);
  1346. if (reset_flags & MSM_HW_FENCE_RESET_WITHOUT_DESTROY)
  1347. goto skip_destroy;
  1348. ret = hw_fence_destroy(drv_data, hw_fence_client,
  1349. hw_fence->ctx_id, hw_fence->seq_id);
  1350. if (ret) {
  1351. HWFNC_ERR("Error destroying HW fence: ctx:%d seqno:%d\n",
  1352. hw_fence->ctx_id, hw_fence->seq_id);
  1353. }
  1354. }
  1355. skip_destroy:
  1356. return ret;
  1357. }
  1358. enum hw_fence_client_data_id hw_fence_get_client_data_id(enum hw_fence_client_id client_id)
  1359. {
  1360. enum hw_fence_client_data_id data_id;
  1361. switch (client_id) {
  1362. case HW_FENCE_CLIENT_ID_CTX0:
  1363. data_id = HW_FENCE_CLIENT_DATA_ID_CTX0;
  1364. break;
  1365. case HW_FENCE_CLIENT_ID_VAL0:
  1366. data_id = HW_FENCE_CLIENT_DATA_ID_VAL0;
  1367. break;
  1368. case HW_FENCE_CLIENT_ID_VAL1:
  1369. data_id = HW_FENCE_CLIENT_DATA_ID_VAL1;
  1370. break;
  1371. case HW_FENCE_CLIENT_ID_IPE:
  1372. data_id = HW_FENCE_CLIENT_DATA_ID_IPE;
  1373. break;
  1374. case HW_FENCE_CLIENT_ID_VPU:
  1375. data_id = HW_FENCE_CLIENT_DATA_ID_VPU;
  1376. break;
  1377. default:
  1378. data_id = HW_FENCE_MAX_CLIENTS_WITH_DATA;
  1379. break;
  1380. }
  1381. return data_id;
  1382. }