hw_fence_drv_priv.c 49 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
  4. */
  5. #include <linux/uaccess.h>
  6. #include <linux/of_platform.h>
  7. #include <linux/of_address.h>
  8. #include "hw_fence_drv_priv.h"
  9. #include "hw_fence_drv_utils.h"
  10. #include "hw_fence_drv_ipc.h"
  11. #include "hw_fence_drv_debug.h"
  12. /* Global atomic lock */
  13. #define GLOBAL_ATOMIC_STORE(drv_data, lock, val) global_atomic_store(drv_data, lock, val)
  14. inline u64 hw_fence_get_qtime(struct hw_fence_driver_data *drv_data)
  15. {
  16. #ifdef HWFENCE_USE_SLEEP_TIMER
  17. return readl_relaxed(drv_data->qtime_io_mem);
  18. #else /* USE QTIMER */
  19. return arch_timer_read_counter();
  20. #endif /* HWFENCE_USE_SLEEP_TIMER */
  21. }
  22. static int init_hw_fences_queues(struct hw_fence_driver_data *drv_data,
  23. enum hw_fence_mem_reserve mem_reserve_id,
  24. struct msm_hw_fence_mem_addr *mem_descriptor,
  25. struct msm_hw_fence_queue *queues, int queues_num,
  26. int client_id)
  27. {
  28. struct msm_hw_fence_hfi_queue_table_header *hfi_table_header;
  29. struct msm_hw_fence_hfi_queue_header *hfi_queue_header;
  30. void *ptr, *qptr;
  31. phys_addr_t phys, qphys;
  32. u32 size, start_queue_offset;
  33. int headers_size, queue_size, payload_size;
  34. int i, ret = 0;
  35. HWFNC_DBG_INIT("mem_reserve_id:%d client_id:%d\n", mem_reserve_id, client_id);
  36. switch (mem_reserve_id) {
  37. case HW_FENCE_MEM_RESERVE_CTRL_QUEUE:
  38. headers_size = HW_FENCE_HFI_CTRL_HEADERS_SIZE;
  39. queue_size = drv_data->hw_fence_ctrl_queue_size;
  40. payload_size = HW_FENCE_CTRL_QUEUE_PAYLOAD;
  41. break;
  42. case HW_FENCE_MEM_RESERVE_CLIENT_QUEUE:
  43. if (client_id >= drv_data->clients_num) {
  44. HWFNC_ERR("Invalid client_id: %d\n", client_id);
  45. return -EINVAL;
  46. }
  47. headers_size = HW_FENCE_HFI_CLIENT_HEADERS_SIZE(queues_num);
  48. queue_size = HW_FENCE_CLIENT_QUEUE_PAYLOAD *
  49. drv_data->hw_fence_client_queue_size[client_id].queue_entries;
  50. payload_size = HW_FENCE_CLIENT_QUEUE_PAYLOAD;
  51. break;
  52. default:
  53. HWFNC_ERR("Unexpected mem reserve id: %d\n", mem_reserve_id);
  54. return -EINVAL;
  55. }
  56. /* Reserve Virtual and Physical memory for HFI headers */
  57. ret = hw_fence_utils_reserve_mem(drv_data, mem_reserve_id, &phys, &ptr, &size, client_id);
  58. if (ret) {
  59. HWFNC_ERR("Failed to reserve id:%d client %d\n", mem_reserve_id, client_id);
  60. return -ENOMEM;
  61. }
  62. HWFNC_DBG_INIT("phys:0x%x ptr:0x%pK size:%d\n", phys, ptr, size);
  63. /* Populate Memory descriptor with address */
  64. mem_descriptor->virtual_addr = ptr;
  65. mem_descriptor->device_addr = phys;
  66. mem_descriptor->size = size; /* bytes */
  67. mem_descriptor->mem_data = NULL; /* Currently we don't need any special info */
  68. HWFNC_DBG_INIT("Initialize headers\n");
  69. /* Initialize headers info within hfi memory */
  70. hfi_table_header = (struct msm_hw_fence_hfi_queue_table_header *)ptr;
  71. hfi_table_header->version = 0;
  72. hfi_table_header->size = size; /* bytes */
  73. /* Offset, from the Base Address, where the first queue header starts */
  74. hfi_table_header->qhdr0_offset =
  75. sizeof(struct msm_hw_fence_hfi_queue_table_header);
  76. hfi_table_header->qhdr_size =
  77. sizeof(struct msm_hw_fence_hfi_queue_header);
  78. hfi_table_header->num_q = queues_num; /* number of queues */
  79. hfi_table_header->num_active_q = queues_num;
  80. /* Initialize Queues Info within HFI memory */
  81. /*
  82. * Calculate offset where hfi queue header starts, which it is at the
  83. * end of the hfi table header
  84. */
  85. HWFNC_DBG_INIT("Initialize queues\n");
  86. hfi_queue_header = (struct msm_hw_fence_hfi_queue_header *)
  87. ((char *)ptr + HW_FENCE_HFI_TABLE_HEADER_SIZE);
  88. for (i = 0; i < queues_num; i++) {
  89. HWFNC_DBG_INIT("init queue[%d]\n", i);
  90. /* Calculate the offset where the Queue starts */
  91. start_queue_offset = headers_size + (i * queue_size); /* Bytes */
  92. qphys = phys + start_queue_offset; /* start of the PA for the queue elems */
  93. qptr = (char *)ptr + start_queue_offset; /* start of the va for queue elems */
  94. /* Set the physical start address in the HFI queue header */
  95. hfi_queue_header->start_addr = qphys;
  96. /* Set the queue type (i.e. RX or TX queue) */
  97. hfi_queue_header->type = (i == 0) ? HW_FENCE_TX_QUEUE : HW_FENCE_RX_QUEUE;
  98. /* Set the size of this header */
  99. hfi_queue_header->queue_size = queue_size;
  100. /* Set the payload size */
  101. hfi_queue_header->pkt_size = payload_size;
  102. /* Store Memory info in the Client data */
  103. queues[i].va_queue = qptr;
  104. queues[i].pa_queue = qphys;
  105. queues[i].va_header = hfi_queue_header;
  106. queues[i].q_size_bytes = queue_size;
  107. HWFNC_DBG_INIT("init:%s client:%d q[%d] va=0x%pK pa=0x%x hd:0x%pK sz:%u pkt:%d\n",
  108. hfi_queue_header->type == HW_FENCE_TX_QUEUE ? "TX_QUEUE" : "RX_QUEUE",
  109. client_id, i, queues[i].va_queue, queues[i].pa_queue, queues[i].va_header,
  110. queues[i].q_size_bytes, payload_size);
  111. /* Next header */
  112. hfi_queue_header++;
  113. }
  114. return ret;
  115. }
  116. static inline bool _lock_client_queue(int queue_type)
  117. {
  118. /* Only lock Rx Queue */
  119. return (queue_type == (HW_FENCE_RX_QUEUE - 1)) ? true : false;
  120. }
  121. char *_get_queue_type(int queue_type)
  122. {
  123. return (queue_type == (HW_FENCE_RX_QUEUE - 1)) ? "RXQ" : "TXQ";
  124. }
  125. int hw_fence_read_queue(struct msm_hw_fence_client *hw_fence_client,
  126. struct msm_hw_fence_queue_payload *payload, int queue_type)
  127. {
  128. struct msm_hw_fence_hfi_queue_header *hfi_header;
  129. struct msm_hw_fence_queue *queue;
  130. u32 read_idx;
  131. u32 write_idx;
  132. u32 to_read_idx;
  133. u32 *read_ptr;
  134. u32 payload_size_u32;
  135. u32 q_size_u32;
  136. struct msm_hw_fence_queue_payload *read_ptr_payload;
  137. if (queue_type >= HW_FENCE_CLIENT_QUEUES || !hw_fence_client || !payload) {
  138. HWFNC_ERR("Invalid queue type:%s hw_fence_client:0x%pK payload:0x%pK\n", queue_type,
  139. hw_fence_client, payload);
  140. return -EINVAL;
  141. }
  142. queue = &hw_fence_client->queues[queue_type];
  143. hfi_header = queue->va_header;
  144. q_size_u32 = (queue->q_size_bytes / sizeof(u32));
  145. payload_size_u32 = (sizeof(struct msm_hw_fence_queue_payload) / sizeof(u32));
  146. HWFNC_DBG_Q("sizeof payload:%d\n", sizeof(struct msm_hw_fence_queue_payload));
  147. if (!hfi_header || !payload) {
  148. HWFNC_ERR("Invalid queue\n");
  149. return -EINVAL;
  150. }
  151. /* Make sure data is ready before read */
  152. mb();
  153. /* Get read and write index */
  154. read_idx = readl_relaxed(&hfi_header->read_index);
  155. write_idx = readl_relaxed(&hfi_header->write_index);
  156. HWFNC_DBG_Q("read client:%d rd_ptr:0x%pK wr_ptr:0x%pK rd_idx:%d wr_idx:%d queue:0x%pK\n",
  157. hw_fence_client->client_id, &hfi_header->read_index, &hfi_header->write_index,
  158. read_idx, write_idx, queue);
  159. if (read_idx == write_idx) {
  160. HWFNC_DBG_Q("Nothing to read!\n");
  161. return 0;
  162. }
  163. /* Move the pointer where we need to read and cast it */
  164. read_ptr = ((u32 *)queue->va_queue + read_idx);
  165. read_ptr_payload = (struct msm_hw_fence_queue_payload *)read_ptr;
  166. HWFNC_DBG_Q("read_ptr:0x%pK queue: va=0x%pK pa=0x%pK read_ptr_payload:0x%pK\n", read_ptr,
  167. queue->va_queue, queue->pa_queue, read_ptr_payload);
  168. /* Calculate the index after the read */
  169. to_read_idx = read_idx + payload_size_u32;
  170. /*
  171. * wrap-around case, here we are reading the last element of the queue, therefore set
  172. * to_read_idx, which is the index after the read, to the beginning of the
  173. * queue
  174. */
  175. if (to_read_idx >= q_size_u32)
  176. to_read_idx = 0;
  177. /* Read the Client Queue */
  178. payload->ctxt_id = readq_relaxed(&read_ptr_payload->ctxt_id);
  179. payload->seqno = readq_relaxed(&read_ptr_payload->seqno);
  180. payload->hash = readq_relaxed(&read_ptr_payload->hash);
  181. payload->flags = readq_relaxed(&read_ptr_payload->flags);
  182. payload->client_data = readq_relaxed(&read_ptr_payload->client_data);
  183. payload->error = readl_relaxed(&read_ptr_payload->error);
  184. /* update the read index */
  185. writel_relaxed(to_read_idx, &hfi_header->read_index);
  186. /* update memory for the index */
  187. wmb();
  188. /* Return one if queue still has contents after read */
  189. return to_read_idx == write_idx ? 0 : 1;
  190. }
  191. /*
  192. * This function writes to the queue of the client. The 'queue_type' determines
  193. * if this function is writing to the rx or tx queue
  194. */
  195. int hw_fence_update_queue(struct hw_fence_driver_data *drv_data,
  196. struct msm_hw_fence_client *hw_fence_client, u64 ctxt_id, u64 seqno, u64 hash,
  197. u64 flags, u64 client_data, u32 error, int queue_type)
  198. {
  199. struct msm_hw_fence_hfi_queue_header *hfi_header;
  200. struct msm_hw_fence_queue *queue;
  201. u32 read_idx;
  202. u32 write_idx;
  203. u32 to_write_idx;
  204. u32 q_size_u32;
  205. u32 q_free_u32;
  206. u32 *q_payload_write_ptr;
  207. u32 payload_size, payload_size_u32;
  208. struct msm_hw_fence_queue_payload *write_ptr_payload;
  209. bool lock_client = false;
  210. u32 lock_idx;
  211. u64 timestamp;
  212. u32 *wr_ptr;
  213. int ret = 0;
  214. if (queue_type >=
  215. drv_data->hw_fence_client_queue_size[hw_fence_client->client_id].queues_num) {
  216. HWFNC_ERR("Invalid queue type:%s client_id:%d\n", queue_type,
  217. hw_fence_client->client_id);
  218. return -EINVAL;
  219. }
  220. queue = &hw_fence_client->queues[queue_type];
  221. hfi_header = queue->va_header;
  222. q_size_u32 = (queue->q_size_bytes / sizeof(u32));
  223. payload_size = sizeof(struct msm_hw_fence_queue_payload);
  224. payload_size_u32 = (payload_size / sizeof(u32));
  225. if (!hfi_header) {
  226. HWFNC_ERR("Invalid queue\n");
  227. return -EINVAL;
  228. }
  229. /* if skipping update txq wr_index, then use hfi_header->tx_wm instead */
  230. if (queue_type == (HW_FENCE_TX_QUEUE - 1) && hw_fence_client->skip_txq_wr_idx)
  231. wr_ptr = &hfi_header->tx_wm;
  232. else
  233. wr_ptr = &hfi_header->write_index;
  234. /*
  235. * We need to lock the client if there is an Rx Queue update, since that
  236. * is the only time when HW Fence driver can have a race condition updating
  237. * the Rx Queue, which also could be getting updated by the Fence CTL
  238. */
  239. lock_client = _lock_client_queue(queue_type);
  240. if (lock_client) {
  241. lock_idx = hw_fence_client->client_id - 1;
  242. if (lock_idx >= drv_data->client_lock_tbl_cnt) {
  243. HWFNC_ERR("lock for client id:%d exceed max:%d\n",
  244. hw_fence_client->client_id, drv_data->client_lock_tbl_cnt);
  245. return -EINVAL;
  246. }
  247. HWFNC_DBG_Q("Locking client id:%d: idx:%d\n", hw_fence_client->client_id, lock_idx);
  248. /* lock the client rx queue to update */
  249. GLOBAL_ATOMIC_STORE(drv_data, &drv_data->client_lock_tbl[lock_idx], 1); /* lock */
  250. }
  251. /* Make sure data is ready before read */
  252. mb();
  253. /* Get read and write index */
  254. read_idx = readl_relaxed(&hfi_header->read_index);
  255. write_idx = readl_relaxed(wr_ptr);
  256. HWFNC_DBG_Q("wr client:%d r_ptr:0x%pK w_ptr:0x%pK r_idx:%d w_idx:%d q:0x%pK type:%d s:%s\n",
  257. hw_fence_client->client_id, &hfi_header->read_index, wr_ptr,
  258. read_idx, write_idx, queue, queue_type,
  259. hw_fence_client->skip_txq_wr_idx ? "true" : "false");
  260. /* Check queue to make sure message will fit */
  261. q_free_u32 = read_idx <= write_idx ? (q_size_u32 - (write_idx - read_idx)) :
  262. (read_idx - write_idx);
  263. if (q_free_u32 <= payload_size_u32) {
  264. HWFNC_ERR("cannot fit the message size:%d\n", payload_size_u32);
  265. ret = -EINVAL;
  266. goto exit;
  267. }
  268. HWFNC_DBG_Q("q_free_u32:%d payload_size_u32:%d\n", q_free_u32, payload_size_u32);
  269. /* Move the pointer where we need to write and cast it */
  270. q_payload_write_ptr = ((u32 *)queue->va_queue + write_idx);
  271. write_ptr_payload = (struct msm_hw_fence_queue_payload *)q_payload_write_ptr;
  272. HWFNC_DBG_Q("q_payload_write_ptr:0x%pK queue: va=0x%pK pa=0x%pK write_ptr_payload:0x%pK\n",
  273. q_payload_write_ptr, queue->va_queue, queue->pa_queue, write_ptr_payload);
  274. /* calculate the index after the write */
  275. to_write_idx = write_idx + payload_size_u32;
  276. HWFNC_DBG_Q("to_write_idx:%d write_idx:%d payload_size:%u\n", to_write_idx, write_idx,
  277. payload_size_u32);
  278. HWFNC_DBG_L("client_id:%d update %s hash:%llu ctx_id:%llu seqno:%llu flags:%llu error:%u\n",
  279. hw_fence_client->client_id, _get_queue_type(queue_type),
  280. hash, ctxt_id, seqno, flags, error);
  281. /*
  282. * wrap-around case, here we are writing to the last element of the queue, therefore
  283. * set to_write_idx, which is the index after the write, to the beginning of the
  284. * queue
  285. */
  286. if (to_write_idx >= q_size_u32)
  287. to_write_idx = 0;
  288. /* Update Client Queue */
  289. writeq_relaxed(payload_size, &write_ptr_payload->size);
  290. writew_relaxed(HW_FENCE_PAYLOAD_TYPE_1, &write_ptr_payload->type);
  291. writew_relaxed(HW_FENCE_PAYLOAD_REV(1, 0), &write_ptr_payload->version);
  292. writeq_relaxed(ctxt_id, &write_ptr_payload->ctxt_id);
  293. writeq_relaxed(seqno, &write_ptr_payload->seqno);
  294. writeq_relaxed(hash, &write_ptr_payload->hash);
  295. writeq_relaxed(flags, &write_ptr_payload->flags);
  296. writeq_relaxed(client_data, &write_ptr_payload->client_data);
  297. writel_relaxed(error, &write_ptr_payload->error);
  298. timestamp = hw_fence_get_qtime(drv_data);
  299. writel_relaxed(timestamp, &write_ptr_payload->timestamp_lo);
  300. writel_relaxed(timestamp >> 32, &write_ptr_payload->timestamp_hi);
  301. /* update memory for the message */
  302. wmb();
  303. /* update the write index */
  304. writel_relaxed(to_write_idx, wr_ptr);
  305. /* update memory for the index */
  306. wmb();
  307. exit:
  308. if (lock_client)
  309. GLOBAL_ATOMIC_STORE(drv_data, &drv_data->client_lock_tbl[lock_idx], 0); /* unlock */
  310. return ret;
  311. }
  312. static int init_global_locks(struct hw_fence_driver_data *drv_data)
  313. {
  314. struct msm_hw_fence_mem_addr *mem_descriptor;
  315. phys_addr_t phys;
  316. void *ptr;
  317. u32 size;
  318. int ret;
  319. ret = hw_fence_utils_reserve_mem(drv_data, HW_FENCE_MEM_RESERVE_LOCKS_REGION, &phys, &ptr,
  320. &size, 0);
  321. if (ret) {
  322. HWFNC_ERR("Failed to reserve clients locks mem %d\n", ret);
  323. return -ENOMEM;
  324. }
  325. HWFNC_DBG_INIT("phys:0x%x ptr:0x%pK size:%d\n", phys, ptr, size);
  326. /* Populate Memory descriptor with address */
  327. mem_descriptor = &drv_data->clients_locks_mem_desc;
  328. mem_descriptor->virtual_addr = ptr;
  329. mem_descriptor->device_addr = phys;
  330. mem_descriptor->size = size;
  331. mem_descriptor->mem_data = NULL; /* not storing special info for now */
  332. /* Initialize internal pointers for managing the tables */
  333. drv_data->client_lock_tbl = (u64 *)drv_data->clients_locks_mem_desc.virtual_addr;
  334. drv_data->client_lock_tbl_cnt = drv_data->clients_locks_mem_desc.size / sizeof(u64);
  335. return 0;
  336. }
  337. static int init_hw_fences_table(struct hw_fence_driver_data *drv_data)
  338. {
  339. struct msm_hw_fence_mem_addr *mem_descriptor;
  340. phys_addr_t phys;
  341. void *ptr;
  342. u32 size;
  343. int ret;
  344. ret = hw_fence_utils_reserve_mem(drv_data, HW_FENCE_MEM_RESERVE_TABLE, &phys, &ptr,
  345. &size, 0);
  346. if (ret) {
  347. HWFNC_ERR("Failed to reserve table mem %d\n", ret);
  348. return -ENOMEM;
  349. }
  350. HWFNC_DBG_INIT("phys:0x%x ptr:0x%pK size:%d\n", phys, ptr, size);
  351. /* Populate Memory descriptor with address */
  352. mem_descriptor = &drv_data->hw_fences_mem_desc;
  353. mem_descriptor->virtual_addr = ptr;
  354. mem_descriptor->device_addr = phys;
  355. mem_descriptor->size = size;
  356. mem_descriptor->mem_data = NULL; /* not storing special info for now */
  357. /* Initialize internal pointers for managing the tables */
  358. drv_data->hw_fences_tbl = (struct msm_hw_fence *)drv_data->hw_fences_mem_desc.virtual_addr;
  359. drv_data->hw_fences_tbl_cnt = drv_data->hw_fences_mem_desc.size /
  360. sizeof(struct msm_hw_fence);
  361. HWFNC_DBG_INIT("hw_fences_table:0x%pK cnt:%u\n", drv_data->hw_fences_tbl,
  362. drv_data->hw_fences_tbl_cnt);
  363. return 0;
  364. }
  365. static int init_ctrl_queue(struct hw_fence_driver_data *drv_data)
  366. {
  367. struct msm_hw_fence_mem_addr *mem_descriptor;
  368. int ret;
  369. mem_descriptor = &drv_data->ctrl_queue_mem_desc;
  370. /* Init ctrl queue */
  371. ret = init_hw_fences_queues(drv_data, HW_FENCE_MEM_RESERVE_CTRL_QUEUE,
  372. mem_descriptor, drv_data->ctrl_queues,
  373. HW_FENCE_CTRL_QUEUES, 0);
  374. if (ret)
  375. HWFNC_ERR("Failure to init ctrl queue\n");
  376. return ret;
  377. }
  378. int hw_fence_init(struct hw_fence_driver_data *drv_data)
  379. {
  380. int ret;
  381. __le32 *mem;
  382. ret = hw_fence_utils_parse_dt_props(drv_data);
  383. if (ret) {
  384. HWFNC_ERR("failed to set dt properties\n");
  385. goto exit;
  386. }
  387. /* Allocate hw fence driver mem pool and share it with HYP */
  388. ret = hw_fence_utils_alloc_mem(drv_data);
  389. if (ret) {
  390. HWFNC_ERR("failed to alloc base memory\n");
  391. goto exit;
  392. }
  393. /* Initialize ctrl queue */
  394. ret = init_ctrl_queue(drv_data);
  395. if (ret)
  396. goto exit;
  397. ret = init_global_locks(drv_data);
  398. if (ret)
  399. goto exit;
  400. HWFNC_DBG_INIT("Locks allocated at 0x%pK total locks:%d\n", drv_data->client_lock_tbl,
  401. drv_data->client_lock_tbl_cnt);
  402. /* Initialize hw fences table */
  403. ret = init_hw_fences_table(drv_data);
  404. if (ret)
  405. goto exit;
  406. /* Map ipcc registers */
  407. ret = hw_fence_utils_map_ipcc(drv_data);
  408. if (ret) {
  409. HWFNC_ERR("ipcc regs mapping failed\n");
  410. goto exit;
  411. }
  412. /* Map time register */
  413. ret = hw_fence_utils_map_qtime(drv_data);
  414. if (ret) {
  415. HWFNC_ERR("qtime reg mapping failed\n");
  416. goto exit;
  417. }
  418. /* Map ctl_start registers */
  419. ret = hw_fence_utils_map_ctl_start(drv_data);
  420. if (ret) {
  421. /* This is not fatal error, since platfoms with dpu-ipc
  422. * won't use this option
  423. */
  424. HWFNC_WARN("no ctl_start regs, won't trigger the frame\n");
  425. }
  426. /* Init debugfs */
  427. ret = hw_fence_debug_debugfs_register(drv_data);
  428. if (ret) {
  429. HWFNC_ERR("debugfs init failed\n");
  430. goto exit;
  431. }
  432. /* Init vIRQ from VM */
  433. ret = hw_fence_utils_init_virq(drv_data);
  434. if (ret) {
  435. HWFNC_ERR("failed to init virq\n");
  436. goto exit;
  437. }
  438. mem = drv_data->io_mem_base;
  439. HWFNC_DBG_H("memory ptr:0x%pK val:0x%x\n", mem, *mem);
  440. HWFNC_DBG_INIT("HW Fences Table Initialized: 0x%pK cnt:%d\n",
  441. drv_data->hw_fences_tbl, drv_data->hw_fences_tbl_cnt);
  442. exit:
  443. return ret;
  444. }
  445. int hw_fence_alloc_client_resources(struct hw_fence_driver_data *drv_data,
  446. struct msm_hw_fence_client *hw_fence_client,
  447. struct msm_hw_fence_mem_addr *mem_descriptor)
  448. {
  449. int ret;
  450. /* Init client queues */
  451. ret = init_hw_fences_queues(drv_data, HW_FENCE_MEM_RESERVE_CLIENT_QUEUE,
  452. &hw_fence_client->mem_descriptor, hw_fence_client->queues,
  453. drv_data->hw_fence_client_queue_size[hw_fence_client->client_id].queues_num,
  454. hw_fence_client->client_id);
  455. if (ret) {
  456. HWFNC_ERR("Failure to init the queue for client:%d\n",
  457. hw_fence_client->client_id);
  458. goto exit;
  459. }
  460. /* Init client memory descriptor */
  461. memcpy(mem_descriptor, &hw_fence_client->mem_descriptor,
  462. sizeof(struct msm_hw_fence_mem_addr));
  463. exit:
  464. return ret;
  465. }
  466. int hw_fence_init_controller_signal(struct hw_fence_driver_data *drv_data,
  467. struct msm_hw_fence_client *hw_fence_client)
  468. {
  469. int ret = 0;
  470. /*
  471. * Initialize IPCC Signals for this client
  472. *
  473. * NOTE: For each Client HW-Core, the client drivers might be the ones making
  474. * it's own initialization (in case that any hw-sequence must be enforced),
  475. * however, if that is not the case, any per-client ipcc init to enable the
  476. * signaling, can go here.
  477. */
  478. switch ((int)hw_fence_client->client_id_ext) {
  479. case HW_FENCE_CLIENT_ID_CTX0:
  480. /* nothing to initialize for gpu client */
  481. break;
  482. #if IS_ENABLED(CONFIG_DEBUG_FS)
  483. case HW_FENCE_CLIENT_ID_VAL0:
  484. case HW_FENCE_CLIENT_ID_VAL1:
  485. case HW_FENCE_CLIENT_ID_VAL2:
  486. case HW_FENCE_CLIENT_ID_VAL3:
  487. case HW_FENCE_CLIENT_ID_VAL4:
  488. case HW_FENCE_CLIENT_ID_VAL5:
  489. case HW_FENCE_CLIENT_ID_VAL6:
  490. /* nothing to initialize for validation clients */
  491. break;
  492. #endif /* CONFIG_DEBUG_FS */
  493. case HW_FENCE_CLIENT_ID_CTL0:
  494. case HW_FENCE_CLIENT_ID_CTL1:
  495. case HW_FENCE_CLIENT_ID_CTL2:
  496. case HW_FENCE_CLIENT_ID_CTL3:
  497. case HW_FENCE_CLIENT_ID_CTL4:
  498. case HW_FENCE_CLIENT_ID_CTL5:
  499. #ifdef HW_DPU_IPCC
  500. /* initialize ipcc signals for dpu clients */
  501. HWFNC_DBG_H("init_controller_signal: DPU client_id_ext:%d initialized:%d\n",
  502. hw_fence_client->client_id_ext, drv_data->ipcc_dpu_initialized);
  503. if (!drv_data->ipcc_dpu_initialized) {
  504. drv_data->ipcc_dpu_initialized = true;
  505. /* Init dpu client ipcc signal */
  506. hw_fence_ipcc_enable_dpu_signaling(drv_data);
  507. }
  508. #endif /* HW_DPU_IPCC */
  509. break;
  510. case HW_FENCE_CLIENT_ID_IPE ... HW_FENCE_CLIENT_ID_IPE +
  511. MSM_HW_FENCE_MAX_SIGNAL_PER_CLIENT - 1:
  512. /* nothing to initialize for IPE client */
  513. break;
  514. case HW_FENCE_CLIENT_ID_VPU ... HW_FENCE_CLIENT_ID_VPU +
  515. MSM_HW_FENCE_MAX_SIGNAL_PER_CLIENT - 1:
  516. /* nothing to initialize for VPU client */
  517. break;
  518. case HW_FENCE_CLIENT_ID_IFE0 ... HW_FENCE_CLIENT_ID_IFE7 +
  519. MSM_HW_FENCE_MAX_SIGNAL_PER_CLIENT - 1:
  520. /* nothing to initialize for IFE clients */
  521. break;
  522. default:
  523. HWFNC_ERR("Unexpected client_id_ext:%d\n", hw_fence_client->client_id_ext);
  524. ret = -EINVAL;
  525. break;
  526. }
  527. return ret;
  528. }
  529. int hw_fence_init_controller_resources(struct msm_hw_fence_client *hw_fence_client)
  530. {
  531. /*
  532. * Initialize Fence Controller resources for this Client,
  533. * here we need to use the CTRL queue to communicate to the Fence
  534. * Controller the shared memory for the Rx/Tx queue for this client
  535. * as well as any information that Fence Controller might need to
  536. * know for this client.
  537. *
  538. * NOTE: For now, we are doing a static allocation of the
  539. * client's queues, so currently we don't need any notification
  540. * to the Fence CTL here through the CTRL queue.
  541. * Later-on we might need it, once the PVM to SVM (and vice versa)
  542. * communication for initialization is supported.
  543. */
  544. return 0;
  545. }
  546. void hw_fence_cleanup_client(struct hw_fence_driver_data *drv_data,
  547. struct msm_hw_fence_client *hw_fence_client)
  548. {
  549. /*
  550. * Deallocate any resource allocated for this client.
  551. * If fence controller was notified about existence of this client,
  552. * we will need to notify fence controller that this client is gone
  553. *
  554. * NOTE: Since currently we are doing a 'fixed' memory for the clients queues,
  555. * we don't need any notification to the Fence Controller, yet..
  556. * however, if the memory allocation is removed from 'fixed' to a dynamic
  557. * allocation, then we will need to notify FenceCTL about the client that is
  558. * going-away here.
  559. */
  560. mutex_lock(&drv_data->clients_register_lock);
  561. drv_data->clients[hw_fence_client->client_id] = NULL;
  562. mutex_unlock(&drv_data->clients_register_lock);
  563. /* Deallocate client's object */
  564. HWFNC_DBG_LUT("freeing client_id:%d\n", hw_fence_client->client_id);
  565. kfree(hw_fence_client);
  566. }
  567. static inline int _calculate_hash(u32 table_total_entries, u64 context, u64 seqno,
  568. u64 step, u64 *hash)
  569. {
  570. u64 m_size = table_total_entries;
  571. int val = 0;
  572. if (step == 0) {
  573. u64 a_multiplier = HW_FENCE_HASH_A_MULT;
  574. u64 c_multiplier = HW_FENCE_HASH_C_MULT;
  575. u64 b_multiplier = context + (context - 1); /* odd multiplier */
  576. /*
  577. * if m, is power of 2, we can optimize with right shift,
  578. * for now we don't do it, to avoid assuming a power of two
  579. */
  580. *hash = (a_multiplier * seqno * b_multiplier + (c_multiplier * context)) % m_size;
  581. } else {
  582. if (step >= m_size) {
  583. /*
  584. * If we already traversed the whole table, return failure since this means
  585. * there are not available spots, table is either full or full-enough
  586. * that we couldn't find an available spot after traverse the whole table.
  587. * Ideally table shouldn't be so full that we cannot find a value after some
  588. * iterations, so this maximum step size could be optimized to fail earlier.
  589. */
  590. HWFNC_ERR("Fence Table tranversed and no available space!\n");
  591. val = -EINVAL;
  592. } else {
  593. /*
  594. * Linearly increment the hash value to find next element in the table
  595. * note that this relies in the 'scrambled' data from the original hash
  596. * Also, add a mod division to wrap-around in case that we reached the
  597. * end of the table
  598. */
  599. *hash = (*hash + 1) % m_size;
  600. }
  601. }
  602. return val;
  603. }
  604. static inline struct msm_hw_fence *_get_hw_fence(u32 table_total_entries,
  605. struct msm_hw_fence *hw_fences_tbl,
  606. u64 hash)
  607. {
  608. if (hash >= table_total_entries) {
  609. HWFNC_ERR("hash:%llu out of max range:%llu\n",
  610. hash, table_total_entries);
  611. return NULL;
  612. }
  613. return &hw_fences_tbl[hash];
  614. }
  615. static bool _is_hw_fence_free(struct msm_hw_fence *hw_fence, u64 context, u64 seqno)
  616. {
  617. /* If valid is set, the hw fence is not free */
  618. return hw_fence->valid ? false : true;
  619. }
  620. static bool _hw_fence_match(struct msm_hw_fence *hw_fence, u64 context, u64 seqno)
  621. {
  622. return ((hw_fence->ctx_id == context && hw_fence->seq_id == seqno) ? true : false);
  623. }
  624. /* clears everything but the 'valid' field */
  625. static void _cleanup_hw_fence(struct msm_hw_fence *hw_fence)
  626. {
  627. int i;
  628. hw_fence->error = 0;
  629. wmb(); /* update memory to avoid mem-abort */
  630. hw_fence->ctx_id = 0;
  631. hw_fence->seq_id = 0;
  632. hw_fence->wait_client_mask = 0;
  633. hw_fence->fence_allocator = 0;
  634. hw_fence->fence_signal_client = 0;
  635. hw_fence->flags = 0;
  636. hw_fence->fence_create_time = 0;
  637. hw_fence->fence_trigger_time = 0;
  638. hw_fence->fence_wait_time = 0;
  639. hw_fence->debug_refcount = 0;
  640. hw_fence->parents_cnt = 0;
  641. hw_fence->pending_child_cnt = 0;
  642. for (i = 0; i < MSM_HW_FENCE_MAX_JOIN_PARENTS; i++)
  643. hw_fence->parent_list[i] = HW_FENCE_INVALID_PARENT_FENCE;
  644. memset(hw_fence->client_data, 0, sizeof(hw_fence->client_data));
  645. }
  646. /* This function must be called with the hw fence lock */
  647. static void _reserve_hw_fence(struct hw_fence_driver_data *drv_data,
  648. struct msm_hw_fence *hw_fence, u32 client_id,
  649. u64 context, u64 seqno, u32 hash, u32 pending_child_cnt)
  650. {
  651. _cleanup_hw_fence(hw_fence);
  652. /* reserve this HW fence */
  653. hw_fence->valid = 1;
  654. hw_fence->ctx_id = context;
  655. hw_fence->seq_id = seqno;
  656. hw_fence->flags = 0; /* fence just reserved, there shouldn't be any flags set */
  657. hw_fence->fence_allocator = client_id;
  658. hw_fence->fence_create_time = hw_fence_get_qtime(drv_data);
  659. hw_fence->debug_refcount++;
  660. HWFNC_DBG_LUT("Reserved fence client:%d ctx:%llu seq:%llu hash:%llu\n",
  661. client_id, context, seqno, hash);
  662. }
  663. /* This function must be called with the hw fence lock */
  664. static void _unreserve_hw_fence(struct hw_fence_driver_data *drv_data,
  665. struct msm_hw_fence *hw_fence, u32 client_id,
  666. u64 context, u64 seqno, u32 hash, u32 pending_child_cnt)
  667. {
  668. _cleanup_hw_fence(hw_fence);
  669. /* unreserve this HW fence */
  670. hw_fence->valid = 0;
  671. HWFNC_DBG_LUT("Unreserved fence client:%d ctx:%llu seq:%llu hash:%llu\n",
  672. client_id, context, seqno, hash);
  673. }
  674. /* This function must be called with the hw fence lock */
  675. static void _reserve_join_fence(struct hw_fence_driver_data *drv_data,
  676. struct msm_hw_fence *hw_fence, u32 client_id, u64 context,
  677. u64 seqno, u32 hash, u32 pending_child_cnt)
  678. {
  679. _cleanup_hw_fence(hw_fence);
  680. /* reserve this HW fence */
  681. hw_fence->valid = true;
  682. hw_fence->ctx_id = context;
  683. hw_fence->seq_id = seqno;
  684. hw_fence->fence_allocator = client_id;
  685. hw_fence->fence_create_time = hw_fence_get_qtime(drv_data);
  686. hw_fence->debug_refcount++;
  687. hw_fence->pending_child_cnt = pending_child_cnt;
  688. HWFNC_DBG_LUT("Reserved join fence client:%d ctx:%llu seq:%llu hash:%llu\n",
  689. client_id, context, seqno, hash);
  690. }
  691. /* This function must be called with the hw fence lock */
  692. static void _fence_found(struct hw_fence_driver_data *drv_data,
  693. struct msm_hw_fence *hw_fence, u32 client_id,
  694. u64 context, u64 seqno, u32 hash, u32 pending_child_cnt)
  695. {
  696. /*
  697. * Do nothing, when this find fence fn is invoked, all processing is done outside.
  698. * Currently just keeping this function for debugging purposes, can be removed
  699. * in final versions
  700. */
  701. HWFNC_DBG_LUT("Found fence client:%d ctx:%llu seq:%llu hash:%llu\n",
  702. client_id, context, seqno, hash);
  703. }
  704. char *_get_op_mode(enum hw_fence_lookup_ops op_code)
  705. {
  706. switch (op_code) {
  707. case HW_FENCE_LOOKUP_OP_CREATE:
  708. return "CREATE";
  709. case HW_FENCE_LOOKUP_OP_DESTROY:
  710. return "DESTROY";
  711. case HW_FENCE_LOOKUP_OP_CREATE_JOIN:
  712. return "CREATE_JOIN";
  713. case HW_FENCE_LOOKUP_OP_FIND_FENCE:
  714. return "FIND_FENCE";
  715. default:
  716. return "UNKNOWN";
  717. }
  718. return "UNKNOWN";
  719. }
  720. struct msm_hw_fence *_hw_fence_lookup_and_process(struct hw_fence_driver_data *drv_data,
  721. struct msm_hw_fence *hw_fences_tbl, u64 context, u64 seqno, u32 client_id,
  722. u32 pending_child_cnt, enum hw_fence_lookup_ops op_code, u64 *hash)
  723. {
  724. bool (*compare_fnc)(struct msm_hw_fence *hfence, u64 context, u64 seqno);
  725. void (*process_fnc)(struct hw_fence_driver_data *drv_data, struct msm_hw_fence *hfence,
  726. u32 client_id, u64 context, u64 seqno, u32 hash, u32 pending);
  727. struct msm_hw_fence *hw_fence = NULL;
  728. u64 step = 0;
  729. int ret = 0;
  730. bool hw_fence_found = false;
  731. if (!hash | !drv_data | !hw_fences_tbl) {
  732. HWFNC_ERR("Invalid input for hw_fence_lookup\n");
  733. return NULL;
  734. }
  735. *hash = ~0;
  736. HWFNC_DBG_LUT("hw_fence_lookup: %d\n", op_code);
  737. switch (op_code) {
  738. case HW_FENCE_LOOKUP_OP_CREATE:
  739. compare_fnc = &_is_hw_fence_free;
  740. process_fnc = &_reserve_hw_fence;
  741. break;
  742. case HW_FENCE_LOOKUP_OP_DESTROY:
  743. compare_fnc = &_hw_fence_match;
  744. process_fnc = &_unreserve_hw_fence;
  745. break;
  746. case HW_FENCE_LOOKUP_OP_CREATE_JOIN:
  747. compare_fnc = &_is_hw_fence_free;
  748. process_fnc = &_reserve_join_fence;
  749. break;
  750. case HW_FENCE_LOOKUP_OP_FIND_FENCE:
  751. compare_fnc = &_hw_fence_match;
  752. process_fnc = &_fence_found;
  753. break;
  754. default:
  755. HWFNC_ERR("Unknown op code:%d\n", op_code);
  756. return NULL;
  757. }
  758. while (!hw_fence_found && (step < drv_data->hw_fence_table_entries)) {
  759. /* Calculate the Hash for the Fence */
  760. ret = _calculate_hash(drv_data->hw_fence_table_entries, context, seqno, step, hash);
  761. if (ret) {
  762. HWFNC_ERR("error calculating hash ctx:%llu seqno:%llu hash:%llu\n",
  763. context, seqno, *hash);
  764. break;
  765. }
  766. HWFNC_DBG_LUT("calculated hash:%llu [ctx:%llu seqno:%llu]\n", *hash, context,
  767. seqno);
  768. /* Get element from the table using the hash */
  769. hw_fence = _get_hw_fence(drv_data->hw_fence_table_entries, hw_fences_tbl, *hash);
  770. HWFNC_DBG_LUT("hw_fence_tbl:0x%pK hw_fence:0x%pK, hash:%llu valid:0x%x\n",
  771. hw_fences_tbl, hw_fence, *hash, hw_fence ? hw_fence->valid : 0xbad);
  772. if (!hw_fence) {
  773. HWFNC_ERR("bad hw fence ctx:%llu seqno:%llu hash:%llu\n",
  774. context, seqno, *hash);
  775. break;
  776. }
  777. GLOBAL_ATOMIC_STORE(drv_data, &hw_fence->lock, 1);
  778. /* compare to either find a free fence or find an allocated fence */
  779. if (compare_fnc(hw_fence, context, seqno)) {
  780. /* Process the hw fence found by the algorithm */
  781. if (process_fnc) {
  782. process_fnc(drv_data, hw_fence, client_id, context, seqno, *hash,
  783. pending_child_cnt);
  784. /* update memory table with processing */
  785. wmb();
  786. }
  787. HWFNC_DBG_L("client_id:%lu op:%s ctx:%llu seqno:%llu hash:%llu step:%llu\n",
  788. client_id, _get_op_mode(op_code), context, seqno, *hash, step);
  789. hw_fence_found = true;
  790. } else {
  791. if ((op_code == HW_FENCE_LOOKUP_OP_CREATE ||
  792. op_code == HW_FENCE_LOOKUP_OP_CREATE_JOIN) &&
  793. seqno == hw_fence->seq_id && context == hw_fence->ctx_id) {
  794. /* ctx & seqno must be unique creating a hw-fence */
  795. HWFNC_ERR("cannot create hw fence with same ctx:%llu seqno:%llu\n",
  796. context, seqno);
  797. GLOBAL_ATOMIC_STORE(drv_data, &hw_fence->lock, 0);
  798. break;
  799. }
  800. /* compare can fail if we have a collision, we will linearly resolve it */
  801. HWFNC_DBG_H("compare failed for hash:%llu [ctx:%llu seqno:%llu]\n", *hash,
  802. context, seqno);
  803. }
  804. GLOBAL_ATOMIC_STORE(drv_data, &hw_fence->lock, 0);
  805. /* Increment step for the next loop */
  806. step++;
  807. }
  808. /* If we iterated through the whole list and didn't find the fence, return null */
  809. if (!hw_fence_found) {
  810. HWFNC_ERR("fail to create hw-fence step:%llu\n", step);
  811. hw_fence = NULL;
  812. }
  813. HWFNC_DBG_LUT("lookup:%d hw_fence:%pK ctx:%llu seqno:%llu hash:%llu flags:0x%llx\n",
  814. op_code, hw_fence, context, seqno, *hash, hw_fence ? hw_fence->flags : -1);
  815. return hw_fence;
  816. }
  817. int hw_fence_create(struct hw_fence_driver_data *drv_data,
  818. struct msm_hw_fence_client *hw_fence_client,
  819. u64 context, u64 seqno, u64 *hash)
  820. {
  821. u32 client_id = hw_fence_client->client_id;
  822. struct msm_hw_fence *hw_fences_tbl = drv_data->hw_fences_tbl;
  823. int ret = 0;
  824. /* allocate hw fence in table */
  825. if (!_hw_fence_lookup_and_process(drv_data, hw_fences_tbl,
  826. context, seqno, client_id, 0, HW_FENCE_LOOKUP_OP_CREATE, hash)) {
  827. HWFNC_ERR("Fail to create fence client:%lu ctx:%llu seqno:%llu\n",
  828. client_id, context, seqno);
  829. ret = -EINVAL;
  830. }
  831. return ret;
  832. }
  833. static inline int _hw_fence_cleanup(struct hw_fence_driver_data *drv_data,
  834. struct msm_hw_fence *hw_fences_tbl, u32 client_id, u64 context, u64 seqno) {
  835. u64 hash;
  836. if (!_hw_fence_lookup_and_process(drv_data, hw_fences_tbl,
  837. context, seqno, client_id, 0, HW_FENCE_LOOKUP_OP_DESTROY, &hash))
  838. return -EINVAL;
  839. return 0;
  840. }
  841. int hw_fence_destroy(struct hw_fence_driver_data *drv_data,
  842. struct msm_hw_fence_client *hw_fence_client,
  843. u64 context, u64 seqno)
  844. {
  845. u32 client_id = hw_fence_client->client_id;
  846. struct msm_hw_fence *hw_fences_tbl = drv_data->hw_fences_tbl;
  847. int ret = 0;
  848. /* remove hw fence from table*/
  849. if (_hw_fence_cleanup(drv_data, hw_fences_tbl, client_id, context, seqno)) {
  850. HWFNC_ERR("Fail destroying fence client:%lu ctx:%llu seqno:%llu\n",
  851. client_id, context, seqno);
  852. ret = -EINVAL;
  853. }
  854. return ret;
  855. }
  856. int hw_fence_destroy_with_hash(struct hw_fence_driver_data *drv_data,
  857. struct msm_hw_fence_client *hw_fence_client, u64 hash)
  858. {
  859. u32 client_id = hw_fence_client->client_id;
  860. struct msm_hw_fence *hw_fences_tbl = drv_data->hw_fences_tbl;
  861. struct msm_hw_fence *hw_fence = NULL;
  862. int ret = 0;
  863. hw_fence = _get_hw_fence(drv_data->hw_fence_table_entries, hw_fences_tbl, hash);
  864. if (!hw_fence) {
  865. HWFNC_ERR("bad hw fence hash:%llu client:%lu\n", hash, client_id);
  866. return -EINVAL;
  867. }
  868. if (hw_fence->fence_allocator != client_id) {
  869. HWFNC_ERR("client:%lu cannot destroy fence hash:%llu fence_allocator:%lu\n",
  870. client_id, hash, hw_fence->fence_allocator);
  871. return -EINVAL;
  872. }
  873. /* remove hw fence from table*/
  874. if (_hw_fence_cleanup(drv_data, hw_fences_tbl, client_id, hw_fence->ctx_id,
  875. hw_fence->seq_id)) {
  876. HWFNC_ERR("Fail destroying fence client:%lu ctx:%llu seqno:%llu hash:%llu\n",
  877. client_id, hw_fence->ctx_id, hw_fence->seq_id, hash);
  878. ret = -EINVAL;
  879. }
  880. return ret;
  881. }
  882. static struct msm_hw_fence *_hw_fence_process_join_fence(struct hw_fence_driver_data *drv_data,
  883. struct msm_hw_fence_client *hw_fence_client,
  884. struct dma_fence_array *array, u64 *hash, bool create)
  885. {
  886. struct msm_hw_fence *hw_fences_tbl;
  887. struct msm_hw_fence *join_fence = NULL;
  888. u64 context, seqno;
  889. u32 client_id, pending_child_cnt;
  890. /*
  891. * NOTE: For now we are allocating the join fences from the same table as all
  892. * the other fences (i.e. drv_data->hw_fences_tbl), functionally this will work, however,
  893. * this might impact the lookup algorithm, since the "join-fences" are created with the
  894. * context and seqno of a fence-array, and those might not be changing by the client,
  895. * so this will linearly increment the look-up and very likely impact the other fences if
  896. * these join-fences start to fill-up a particular region of the fences global table.
  897. * So we might have to allocate a different table altogether for these join fences.
  898. * However, to do this, just alloc another table and change it here:
  899. */
  900. hw_fences_tbl = drv_data->hw_fences_tbl;
  901. context = array->base.context;
  902. seqno = array->base.seqno;
  903. pending_child_cnt = array->num_fences;
  904. client_id = HW_FENCE_JOIN_FENCE_CLIENT_ID;
  905. if (create) {
  906. /* allocate the fence */
  907. join_fence = _hw_fence_lookup_and_process(drv_data, hw_fences_tbl, context,
  908. seqno, client_id, pending_child_cnt, HW_FENCE_LOOKUP_OP_CREATE_JOIN, hash);
  909. if (!join_fence)
  910. HWFNC_ERR("Fail to create join fence client:%lu ctx:%llu seqno:%llu\n",
  911. client_id, context, seqno);
  912. } else {
  913. /* destroy the fence */
  914. if (_hw_fence_cleanup(drv_data, hw_fences_tbl, client_id, context, seqno))
  915. HWFNC_ERR("Fail destroying join fence client:%lu ctx:%llu seqno:%llu\n",
  916. client_id, context, seqno);
  917. }
  918. return join_fence;
  919. }
  920. struct msm_hw_fence *msm_hw_fence_find(struct hw_fence_driver_data *drv_data,
  921. struct msm_hw_fence_client *hw_fence_client,
  922. u64 context, u64 seqno, u64 *hash)
  923. {
  924. struct msm_hw_fence *hw_fences_tbl = drv_data->hw_fences_tbl;
  925. struct msm_hw_fence *hw_fence;
  926. u32 client_id = hw_fence_client ? hw_fence_client->client_id : 0xff;
  927. /* find the hw fence */
  928. hw_fence = _hw_fence_lookup_and_process(drv_data, hw_fences_tbl, context,
  929. seqno, client_id, 0, HW_FENCE_LOOKUP_OP_FIND_FENCE, hash);
  930. if (!hw_fence)
  931. HWFNC_ERR("Fail to find hw fence client:%lu ctx:%llu seqno:%llu\n",
  932. client_id, context, seqno);
  933. return hw_fence;
  934. }
  935. static void _fence_ctl_signal(struct hw_fence_driver_data *drv_data,
  936. struct msm_hw_fence_client *hw_fence_client, struct msm_hw_fence *hw_fence, u64 hash,
  937. u64 flags, u64 client_data, u32 error)
  938. {
  939. u32 tx_client_id = drv_data->ipcc_client_pid; /* phys id for tx client */
  940. u32 rx_client_id = hw_fence_client->ipc_client_vid; /* virt id for rx client */
  941. HWFNC_DBG_H("We must signal the client now! hfence hash:%llu\n", hash);
  942. /* Write to Rx queue */
  943. if (hw_fence_client->update_rxq)
  944. hw_fence_update_queue(drv_data, hw_fence_client, hw_fence->ctx_id,
  945. hw_fence->seq_id, hash, flags, client_data, error, HW_FENCE_RX_QUEUE - 1);
  946. /* Signal the hw fence now */
  947. if (hw_fence_client->send_ipc)
  948. hw_fence_ipcc_trigger_signal(drv_data, tx_client_id, rx_client_id,
  949. hw_fence_client->ipc_signal_id);
  950. #if IS_ENABLED(CONFIG_DEBUG_FS)
  951. if (hw_fence_client->client_id >= HW_FENCE_CLIENT_ID_VAL0
  952. && hw_fence_client->client_id <= HW_FENCE_CLIENT_ID_VAL6)
  953. process_validation_client_loopback(drv_data, hw_fence_client->client_id);
  954. #endif /* CONFIG_DEBUG_FS */
  955. }
  956. static void _cleanup_join_and_child_fences(struct hw_fence_driver_data *drv_data,
  957. struct msm_hw_fence_client *hw_fence_client, int iteration, struct dma_fence_array *array,
  958. struct msm_hw_fence *join_fence, u64 hash_join_fence)
  959. {
  960. struct dma_fence *child_fence;
  961. struct msm_hw_fence *hw_fence_child;
  962. int idx, j;
  963. u64 hash = 0;
  964. if (!array->fences)
  965. goto destroy_fence;
  966. /* cleanup the child-fences from the parent join-fence */
  967. for (idx = iteration; idx >= 0; idx--) {
  968. child_fence = array->fences[idx];
  969. if (!child_fence) {
  970. HWFNC_ERR("invalid child fence idx:%d\n", idx);
  971. continue;
  972. }
  973. hw_fence_child = msm_hw_fence_find(drv_data, hw_fence_client, child_fence->context,
  974. child_fence->seqno, &hash);
  975. if (!hw_fence_child) {
  976. HWFNC_ERR("Cannot cleanup child fence context:%lu seqno:%lu hash:%lu\n",
  977. child_fence->context, child_fence->seqno, hash);
  978. /*
  979. * ideally this should not have happened, but if it did, try to keep
  980. * cleaning-up other fences after printing the error
  981. */
  982. continue;
  983. }
  984. /* lock the child while we clean it up from the parent join-fence */
  985. GLOBAL_ATOMIC_STORE(drv_data, &hw_fence_child->lock, 1); /* lock */
  986. for (j = hw_fence_child->parents_cnt; j > 0; j--) {
  987. if (j > MSM_HW_FENCE_MAX_JOIN_PARENTS) {
  988. HWFNC_ERR("Invalid max parents_cnt:%d, will reset to max:%d\n",
  989. hw_fence_child->parents_cnt, MSM_HW_FENCE_MAX_JOIN_PARENTS);
  990. j = MSM_HW_FENCE_MAX_JOIN_PARENTS;
  991. }
  992. if (hw_fence_child->parent_list[j - 1] == hash_join_fence) {
  993. hw_fence_child->parent_list[j - 1] = HW_FENCE_INVALID_PARENT_FENCE;
  994. if (hw_fence_child->parents_cnt)
  995. hw_fence_child->parents_cnt--;
  996. /* update memory for the table update */
  997. wmb();
  998. }
  999. }
  1000. GLOBAL_ATOMIC_STORE(drv_data, &hw_fence_child->lock, 0); /* unlock */
  1001. }
  1002. destroy_fence:
  1003. /* destroy join fence */
  1004. _hw_fence_process_join_fence(drv_data, hw_fence_client, array, &hash_join_fence,
  1005. false);
  1006. }
  1007. int hw_fence_process_fence_array(struct hw_fence_driver_data *drv_data,
  1008. struct msm_hw_fence_client *hw_fence_client, struct dma_fence_array *array,
  1009. u64 *hash_join_fence, u64 client_data)
  1010. {
  1011. struct msm_hw_fence *join_fence;
  1012. struct msm_hw_fence *hw_fence_child;
  1013. struct dma_fence *child_fence;
  1014. bool signal_join_fence = false;
  1015. u64 hash;
  1016. int i, ret = 0;
  1017. enum hw_fence_client_data_id data_id;
  1018. if (client_data) {
  1019. data_id = hw_fence_get_client_data_id(hw_fence_client->client_id_ext);
  1020. if (data_id >= HW_FENCE_MAX_CLIENTS_WITH_DATA) {
  1021. HWFNC_ERR("Populating client_data:%llu with invalid client_id_ext:%d\n",
  1022. client_data, hw_fence_client->client_id_ext);
  1023. return -EINVAL;
  1024. }
  1025. }
  1026. /*
  1027. * Create join fence from the join-fences table,
  1028. * This function initializes:
  1029. * join_fence->pending_child_count = array->num_fences
  1030. */
  1031. join_fence = _hw_fence_process_join_fence(drv_data, hw_fence_client, array,
  1032. hash_join_fence, true);
  1033. if (!join_fence) {
  1034. HWFNC_ERR("cannot alloc hw fence for join fence array\n");
  1035. return -EINVAL;
  1036. }
  1037. /* update this as waiting client of the join-fence */
  1038. GLOBAL_ATOMIC_STORE(drv_data, &join_fence->lock, 1); /* lock */
  1039. join_fence->wait_client_mask |= BIT(hw_fence_client->client_id);
  1040. GLOBAL_ATOMIC_STORE(drv_data, &join_fence->lock, 0); /* unlock */
  1041. /* Iterate through fences of the array */
  1042. for (i = 0; i < array->num_fences; i++) {
  1043. child_fence = array->fences[i];
  1044. /* Nested fence-arrays are not supported */
  1045. if (to_dma_fence_array(child_fence)) {
  1046. HWFNC_ERR("This is a nested fence, fail!\n");
  1047. ret = -EINVAL;
  1048. goto error_array;
  1049. }
  1050. /* All elements in the fence-array must be hw-fences */
  1051. if (!test_bit(MSM_HW_FENCE_FLAG_ENABLED_BIT, &child_fence->flags)) {
  1052. HWFNC_ERR("DMA Fence in FenceArray is not a HW Fence\n");
  1053. ret = -EINVAL;
  1054. goto error_array;
  1055. }
  1056. /* Find the HW Fence in the Global Table */
  1057. hw_fence_child = msm_hw_fence_find(drv_data, hw_fence_client, child_fence->context,
  1058. child_fence->seqno, &hash);
  1059. if (!hw_fence_child) {
  1060. HWFNC_ERR("Cannot find child fence context:%lu seqno:%lu hash:%lu\n",
  1061. child_fence->context, child_fence->seqno, hash);
  1062. ret = -EINVAL;
  1063. goto error_array;
  1064. }
  1065. GLOBAL_ATOMIC_STORE(drv_data, &hw_fence_child->lock, 1); /* lock */
  1066. if (hw_fence_child->flags & MSM_HW_FENCE_FLAG_SIGNAL) {
  1067. /* child fence is already signaled */
  1068. GLOBAL_ATOMIC_STORE(drv_data, &join_fence->lock, 1); /* lock */
  1069. if (--join_fence->pending_child_cnt == 0)
  1070. signal_join_fence = true;
  1071. /* update memory for the table update */
  1072. wmb();
  1073. GLOBAL_ATOMIC_STORE(drv_data, &join_fence->lock, 0); /* unlock */
  1074. } else {
  1075. /* child fence is not signaled */
  1076. hw_fence_child->parents_cnt++;
  1077. if (hw_fence_child->parents_cnt >= MSM_HW_FENCE_MAX_JOIN_PARENTS
  1078. || hw_fence_child->parents_cnt < 1) {
  1079. /* Max number of parents for a fence is exceeded */
  1080. HWFNC_ERR("DMA Fence in FenceArray exceeds parents:%d\n",
  1081. hw_fence_child->parents_cnt);
  1082. hw_fence_child->parents_cnt--;
  1083. /* update memory for the table update */
  1084. wmb();
  1085. /* unlock */
  1086. GLOBAL_ATOMIC_STORE(drv_data, &hw_fence_child->lock, 0);
  1087. ret = -EINVAL;
  1088. goto error_array;
  1089. }
  1090. hw_fence_child->parent_list[hw_fence_child->parents_cnt - 1] =
  1091. *hash_join_fence;
  1092. /* update memory for the table update */
  1093. wmb();
  1094. }
  1095. GLOBAL_ATOMIC_STORE(drv_data, &hw_fence_child->lock, 0); /* unlock */
  1096. }
  1097. if (client_data)
  1098. join_fence->client_data[data_id] = client_data;
  1099. /* all fences were signaled, signal client now */
  1100. if (signal_join_fence) {
  1101. /* signal the join hw fence */
  1102. _fence_ctl_signal(drv_data, hw_fence_client, join_fence, *hash_join_fence, 0, 0,
  1103. client_data);
  1104. set_bit(MSM_HW_FENCE_FLAG_SIGNALED_BIT, &array->base.flags);
  1105. /*
  1106. * job of the join-fence is finished since we already signaled,
  1107. * we can delete it now. This can happen when all the fences that
  1108. * are part of the join-fence are already signaled.
  1109. */
  1110. _hw_fence_process_join_fence(drv_data, hw_fence_client, array, hash_join_fence,
  1111. false);
  1112. } else if (!array->num_fences) {
  1113. /*
  1114. * if we didn't signal the join-fence and the number of fences is not set in
  1115. * the fence-array, then fail here, otherwise driver would create a join-fence
  1116. * with no-childs that won't be signaled at all or an incomplete join-fence
  1117. */
  1118. HWFNC_ERR("invalid fence-array ctx:%llu seqno:%llu without fences\n",
  1119. array->base.context, array->base.seqno);
  1120. goto error_array;
  1121. }
  1122. return ret;
  1123. error_array:
  1124. _cleanup_join_and_child_fences(drv_data, hw_fence_client, i, array, join_fence,
  1125. *hash_join_fence);
  1126. return -EINVAL;
  1127. }
  1128. int hw_fence_register_wait_client(struct hw_fence_driver_data *drv_data,
  1129. struct dma_fence *fence, struct msm_hw_fence_client *hw_fence_client, u64 context,
  1130. u64 seqno, u64 *hash, u64 client_data)
  1131. {
  1132. struct msm_hw_fence *hw_fence;
  1133. enum hw_fence_client_data_id data_id;
  1134. if (client_data) {
  1135. data_id = hw_fence_get_client_data_id(hw_fence_client->client_id_ext);
  1136. if (data_id >= HW_FENCE_MAX_CLIENTS_WITH_DATA) {
  1137. HWFNC_ERR("Populating client_data:%llu with invalid client_id_ext:%d\n",
  1138. client_data, hw_fence_client->client_id);
  1139. return -EINVAL;
  1140. }
  1141. }
  1142. /* find the hw fence within the table */
  1143. hw_fence = msm_hw_fence_find(drv_data, hw_fence_client, context, seqno, hash);
  1144. if (!hw_fence) {
  1145. HWFNC_ERR("Cannot find fence!\n");
  1146. return -EINVAL;
  1147. }
  1148. GLOBAL_ATOMIC_STORE(drv_data, &hw_fence->lock, 1); /* lock */
  1149. /* register client in the hw fence */
  1150. hw_fence->wait_client_mask |= BIT(hw_fence_client->client_id);
  1151. hw_fence->fence_wait_time = hw_fence_get_qtime(drv_data);
  1152. hw_fence->debug_refcount++;
  1153. if (client_data)
  1154. hw_fence->client_data[data_id] = client_data;
  1155. /* update memory for the table update */
  1156. wmb();
  1157. GLOBAL_ATOMIC_STORE(drv_data, &hw_fence->lock, 0); /* unlock */
  1158. /* if hw fence already signaled, signal the client */
  1159. if (hw_fence->flags & MSM_HW_FENCE_FLAG_SIGNAL) {
  1160. if (fence != NULL)
  1161. set_bit(MSM_HW_FENCE_FLAG_SIGNALED_BIT, &fence->flags);
  1162. _fence_ctl_signal(drv_data, hw_fence_client, hw_fence, *hash, 0, client_data, 0);
  1163. }
  1164. return 0;
  1165. }
  1166. int hw_fence_process_fence(struct hw_fence_driver_data *drv_data,
  1167. struct msm_hw_fence_client *hw_fence_client,
  1168. struct dma_fence *fence, u64 *hash, u64 client_data)
  1169. {
  1170. int ret = 0;
  1171. if (!drv_data | !hw_fence_client | !fence) {
  1172. HWFNC_ERR("Invalid Input!\n");
  1173. return -EINVAL;
  1174. }
  1175. /* fence must be hw-fence */
  1176. if (!test_bit(MSM_HW_FENCE_FLAG_ENABLED_BIT, &fence->flags)) {
  1177. HWFNC_ERR("DMA Fence in is not a HW Fence flags:0x%llx\n", fence->flags);
  1178. return -EINVAL;
  1179. }
  1180. ret = hw_fence_register_wait_client(drv_data, fence, hw_fence_client, fence->context,
  1181. fence->seqno, hash, client_data);
  1182. if (ret)
  1183. HWFNC_ERR("Error registering for wait client:%d\n", hw_fence_client->client_id);
  1184. return ret;
  1185. }
  1186. static void _signal_all_wait_clients(struct hw_fence_driver_data *drv_data,
  1187. struct msm_hw_fence *hw_fence, u64 hash, int error)
  1188. {
  1189. enum hw_fence_client_id wait_client_id;
  1190. enum hw_fence_client_data_id data_id;
  1191. struct msm_hw_fence_client *hw_fence_wait_client;
  1192. u64 client_data = 0;
  1193. /* signal with an error all the waiting clients for this fence */
  1194. for (wait_client_id = 0; wait_client_id <= drv_data->rxq_clients_num; wait_client_id++) {
  1195. if (hw_fence->wait_client_mask & BIT(wait_client_id)) {
  1196. hw_fence_wait_client = drv_data->clients[wait_client_id];
  1197. data_id = hw_fence_get_client_data_id(hw_fence_wait_client->client_id_ext);
  1198. if (data_id < HW_FENCE_MAX_CLIENTS_WITH_DATA)
  1199. client_data = hw_fence->client_data[data_id];
  1200. if (hw_fence_wait_client)
  1201. _fence_ctl_signal(drv_data, hw_fence_wait_client, hw_fence,
  1202. hash, 0, client_data, error);
  1203. }
  1204. }
  1205. }
  1206. void hw_fence_utils_reset_queues(struct hw_fence_driver_data *drv_data,
  1207. struct msm_hw_fence_client *hw_fence_client)
  1208. {
  1209. struct msm_hw_fence_hfi_queue_header *hfi_header;
  1210. struct msm_hw_fence_queue *queue;
  1211. u32 rd_idx, wr_idx, lock_idx;
  1212. queue = &hw_fence_client->queues[HW_FENCE_TX_QUEUE - 1];
  1213. hfi_header = queue->va_header;
  1214. /* For the client TxQ: set the read-index same as last write that was done by the client */
  1215. mb(); /* make sure data is ready before read */
  1216. wr_idx = readl_relaxed(&hfi_header->write_index);
  1217. writel_relaxed(wr_idx, &hfi_header->read_index);
  1218. wmb(); /* make sure data is updated after write the index*/
  1219. /* For the client RxQ: set the write-index same as last read done by the client */
  1220. if (hw_fence_client->update_rxq) {
  1221. lock_idx = hw_fence_client->client_id - 1;
  1222. if (lock_idx >= drv_data->client_lock_tbl_cnt) {
  1223. HWFNC_ERR("cannot reset rxq, lock for client id:%d exceed max:%d\n",
  1224. hw_fence_client->client_id, drv_data->client_lock_tbl_cnt);
  1225. return;
  1226. }
  1227. HWFNC_DBG_Q("Locking client id:%d: idx:%d\n", hw_fence_client->client_id, lock_idx);
  1228. /* lock the client rx queue to update */
  1229. GLOBAL_ATOMIC_STORE(drv_data, &drv_data->client_lock_tbl[lock_idx], 1);
  1230. queue = &hw_fence_client->queues[HW_FENCE_RX_QUEUE - 1];
  1231. hfi_header = queue->va_header;
  1232. mb(); /* make sure data is ready before read */
  1233. rd_idx = readl_relaxed(&hfi_header->read_index);
  1234. writel_relaxed(rd_idx, &hfi_header->write_index);
  1235. wmb(); /* make sure data is updated after write the index */
  1236. /* unlock */
  1237. GLOBAL_ATOMIC_STORE(drv_data, &drv_data->client_lock_tbl[lock_idx], 0);
  1238. }
  1239. }
  1240. int hw_fence_utils_cleanup_fence(struct hw_fence_driver_data *drv_data,
  1241. struct msm_hw_fence_client *hw_fence_client, struct msm_hw_fence *hw_fence, u64 hash,
  1242. u32 reset_flags)
  1243. {
  1244. int ret = 0;
  1245. int error = (reset_flags & MSM_HW_FENCE_RESET_WITHOUT_ERROR) ? 0 : MSM_HW_FENCE_ERROR_RESET;
  1246. GLOBAL_ATOMIC_STORE(drv_data, &hw_fence->lock, 1); /* lock */
  1247. if (hw_fence->wait_client_mask & BIT(hw_fence_client->client_id)) {
  1248. HWFNC_DBG_H("clearing client:%d wait bit for fence: ctx:%d seqno:%d\n",
  1249. hw_fence_client->client_id, hw_fence->ctx_id,
  1250. hw_fence->seq_id);
  1251. hw_fence->wait_client_mask &= ~BIT(hw_fence_client->client_id);
  1252. /* update memory for the table update */
  1253. wmb();
  1254. }
  1255. GLOBAL_ATOMIC_STORE(drv_data, &hw_fence->lock, 0); /* unlock */
  1256. if (hw_fence->fence_allocator == hw_fence_client->client_id) {
  1257. /* if fence is not signaled, signal with error all the waiting clients */
  1258. if (!(hw_fence->flags & MSM_HW_FENCE_FLAG_SIGNAL))
  1259. _signal_all_wait_clients(drv_data, hw_fence, hash, error);
  1260. if (reset_flags & MSM_HW_FENCE_RESET_WITHOUT_DESTROY)
  1261. goto skip_destroy;
  1262. ret = hw_fence_destroy(drv_data, hw_fence_client,
  1263. hw_fence->ctx_id, hw_fence->seq_id);
  1264. if (ret) {
  1265. HWFNC_ERR("Error destroying HW fence: ctx:%d seqno:%d\n",
  1266. hw_fence->ctx_id, hw_fence->seq_id);
  1267. }
  1268. }
  1269. skip_destroy:
  1270. return ret;
  1271. }
  1272. enum hw_fence_client_data_id hw_fence_get_client_data_id(enum hw_fence_client_id client_id)
  1273. {
  1274. enum hw_fence_client_data_id data_id;
  1275. switch (client_id) {
  1276. case HW_FENCE_CLIENT_ID_CTX0:
  1277. data_id = HW_FENCE_CLIENT_DATA_ID_CTX0;
  1278. break;
  1279. case HW_FENCE_CLIENT_ID_VAL0:
  1280. data_id = HW_FENCE_CLIENT_DATA_ID_VAL0;
  1281. break;
  1282. case HW_FENCE_CLIENT_ID_VAL1:
  1283. data_id = HW_FENCE_CLIENT_DATA_ID_VAL1;
  1284. break;
  1285. case HW_FENCE_CLIENT_ID_IPE:
  1286. data_id = HW_FENCE_CLIENT_DATA_ID_IPE;
  1287. break;
  1288. case HW_FENCE_CLIENT_ID_VPU:
  1289. data_id = HW_FENCE_CLIENT_DATA_ID_VPU;
  1290. break;
  1291. default:
  1292. data_id = HW_FENCE_MAX_CLIENTS_WITH_DATA;
  1293. break;
  1294. }
  1295. return data_id;
  1296. }