hw_fence_drv_priv.c 43 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
  4. */
  5. #include <linux/uaccess.h>
  6. #include <linux/of_platform.h>
  7. #include <linux/of_address.h>
  8. #include "hw_fence_drv_priv.h"
  9. #include "hw_fence_drv_utils.h"
  10. #include "hw_fence_drv_ipc.h"
  11. #include "hw_fence_drv_debug.h"
  12. /* Global atomic lock */
  13. #define GLOBAL_ATOMIC_STORE(drv_data, lock, val) global_atomic_store(drv_data, lock, val)
  14. inline u64 hw_fence_get_qtime(struct hw_fence_driver_data *drv_data)
  15. {
  16. #ifdef HWFENCE_USE_SLEEP_TIMER
  17. return readl_relaxed(drv_data->qtime_io_mem);
  18. #else /* USE QTIMER */
  19. return arch_timer_read_counter();
  20. #endif /* HWFENCE_USE_SLEEP_TIMER */
  21. }
  22. static int init_hw_fences_queues(struct hw_fence_driver_data *drv_data,
  23. enum hw_fence_mem_reserve mem_reserve_id,
  24. struct msm_hw_fence_mem_addr *mem_descriptor,
  25. struct msm_hw_fence_queue *queues, int queues_num,
  26. int client_id)
  27. {
  28. struct msm_hw_fence_hfi_queue_table_header *hfi_table_header;
  29. struct msm_hw_fence_hfi_queue_header *hfi_queue_header;
  30. void *ptr, *qptr;
  31. phys_addr_t phys, qphys;
  32. u32 size, start_queue_offset;
  33. int headers_size, queue_size, payload_size;
  34. int i, ret = 0;
  35. HWFNC_DBG_INIT("mem_reserve_id:%d client_id:%d\n", mem_reserve_id, client_id);
  36. switch (mem_reserve_id) {
  37. case HW_FENCE_MEM_RESERVE_CTRL_QUEUE:
  38. headers_size = HW_FENCE_HFI_CTRL_HEADERS_SIZE;
  39. queue_size = drv_data->hw_fence_ctrl_queue_size;
  40. payload_size = HW_FENCE_CTRL_QUEUE_PAYLOAD;
  41. break;
  42. case HW_FENCE_MEM_RESERVE_CLIENT_QUEUE:
  43. headers_size = HW_FENCE_HFI_CLIENT_HEADERS_SIZE;
  44. queue_size = drv_data->hw_fence_client_queue_size;
  45. payload_size = HW_FENCE_CLIENT_QUEUE_PAYLOAD;
  46. break;
  47. default:
  48. HWFNC_ERR("Unexpected mem reserve id: %d\n", mem_reserve_id);
  49. return -EINVAL;
  50. }
  51. /* Reserve Virtual and Physical memory for HFI headers */
  52. ret = hw_fence_utils_reserve_mem(drv_data, mem_reserve_id, &phys, &ptr, &size, client_id);
  53. if (ret) {
  54. HWFNC_ERR("Failed to reserve id:%d client %d\n", mem_reserve_id, client_id);
  55. return -ENOMEM;
  56. }
  57. HWFNC_DBG_INIT("phys:0x%x ptr:0x%pK size:%d\n", phys, ptr, size);
  58. /* Populate Memory descriptor with address */
  59. mem_descriptor->virtual_addr = ptr;
  60. mem_descriptor->device_addr = phys;
  61. mem_descriptor->size = size; /* bytes */
  62. mem_descriptor->mem_data = NULL; /* Currently we don't need any special info */
  63. HWFNC_DBG_INIT("Initialize headers\n");
  64. /* Initialize headers info within hfi memory */
  65. hfi_table_header = (struct msm_hw_fence_hfi_queue_table_header *)ptr;
  66. hfi_table_header->version = 0;
  67. hfi_table_header->size = size; /* bytes */
  68. /* Offset, from the Base Address, where the first queue header starts */
  69. hfi_table_header->qhdr0_offset =
  70. sizeof(struct msm_hw_fence_hfi_queue_table_header);
  71. hfi_table_header->qhdr_size =
  72. sizeof(struct msm_hw_fence_hfi_queue_header);
  73. hfi_table_header->num_q = queues_num; /* number of queues */
  74. hfi_table_header->num_active_q = queues_num;
  75. /* Initialize Queues Info within HFI memory */
  76. /*
  77. * Calculate offset where hfi queue header starts, which it is at the
  78. * end of the hfi table header
  79. */
  80. HWFNC_DBG_INIT("Initialize queues\n");
  81. hfi_queue_header = (struct msm_hw_fence_hfi_queue_header *)
  82. ((char *)ptr + HW_FENCE_HFI_TABLE_HEADER_SIZE);
  83. for (i = 0; i < queues_num; i++) {
  84. HWFNC_DBG_INIT("init queue[%d]\n", i);
  85. /* Calculate the offset where the Queue starts */
  86. start_queue_offset = headers_size + (i * queue_size); /* Bytes */
  87. qphys = phys + start_queue_offset; /* start of the PA for the queue elems */
  88. qptr = (char *)ptr + start_queue_offset; /* start of the va for queue elems */
  89. /* Set the physical start address in the HFI queue header */
  90. hfi_queue_header->start_addr = qphys;
  91. /* Set the queue type (i.e. RX or TX queue) */
  92. hfi_queue_header->type = (i == 0) ? HW_FENCE_TX_QUEUE : HW_FENCE_RX_QUEUE;
  93. /* Set the size of this header */
  94. hfi_queue_header->queue_size = queue_size;
  95. /* Set the payload size */
  96. hfi_queue_header->pkt_size = payload_size;
  97. /* Store Memory info in the Client data */
  98. queues[i].va_queue = qptr;
  99. queues[i].pa_queue = qphys;
  100. queues[i].va_header = hfi_queue_header;
  101. queues[i].q_size_bytes = queue_size;
  102. HWFNC_DBG_INIT("init:%s client:%d q[%d] va=0x%pK pa=0x%x hd:0x%pK sz:%u pkt:%d\n",
  103. hfi_queue_header->type == HW_FENCE_TX_QUEUE ? "TX_QUEUE" : "RX_QUEUE",
  104. client_id, i, queues[i].va_queue, queues[i].pa_queue, queues[i].va_header,
  105. queues[i].q_size_bytes, payload_size);
  106. /* Next header */
  107. hfi_queue_header++;
  108. }
  109. return ret;
  110. }
  111. static inline bool _lock_client_queue(int queue_type)
  112. {
  113. /* Only lock Rx Queue */
  114. return (queue_type == (HW_FENCE_RX_QUEUE - 1)) ? true : false;
  115. }
  116. char *_get_queue_type(int queue_type)
  117. {
  118. return (queue_type == (HW_FENCE_RX_QUEUE - 1)) ? "RXQ" : "TXQ";
  119. }
  120. int hw_fence_read_queue(struct msm_hw_fence_client *hw_fence_client,
  121. struct msm_hw_fence_queue_payload *payload, int queue_type)
  122. {
  123. struct msm_hw_fence_hfi_queue_header *hfi_header;
  124. struct msm_hw_fence_queue *queue;
  125. u32 read_idx;
  126. u32 write_idx;
  127. u32 to_read_idx;
  128. u32 *read_ptr;
  129. u32 payload_size_u32;
  130. u32 q_size_u32;
  131. struct msm_hw_fence_queue_payload *read_ptr_payload;
  132. if (queue_type >= HW_FENCE_CLIENT_QUEUES || !hw_fence_client || !payload) {
  133. HWFNC_ERR("Invalid queue type:%s hw_fence_client:0x%pK payload:0x%pK\n", queue_type,
  134. hw_fence_client, payload);
  135. return -EINVAL;
  136. }
  137. queue = &hw_fence_client->queues[queue_type];
  138. hfi_header = queue->va_header;
  139. q_size_u32 = (queue->q_size_bytes / sizeof(u32));
  140. payload_size_u32 = (sizeof(struct msm_hw_fence_queue_payload) / sizeof(u32));
  141. HWFNC_DBG_Q("sizeof payload:%d\n", sizeof(struct msm_hw_fence_queue_payload));
  142. if (!hfi_header || !payload) {
  143. HWFNC_ERR("Invalid queue\n");
  144. return -EINVAL;
  145. }
  146. /* Make sure data is ready before read */
  147. mb();
  148. /* Get read and write index */
  149. read_idx = readl_relaxed(&hfi_header->read_index);
  150. write_idx = readl_relaxed(&hfi_header->write_index);
  151. HWFNC_DBG_Q("read client:%d rd_ptr:0x%pK wr_ptr:0x%pK rd_idx:%d wr_idx:%d queue:0x%pK\n",
  152. hw_fence_client->client_id, &hfi_header->read_index, &hfi_header->write_index,
  153. read_idx, write_idx, queue);
  154. if (read_idx == write_idx) {
  155. HWFNC_DBG_Q("Nothing to read!\n");
  156. return 0;
  157. }
  158. /* Move the pointer where we need to read and cast it */
  159. read_ptr = ((u32 *)queue->va_queue + read_idx);
  160. read_ptr_payload = (struct msm_hw_fence_queue_payload *)read_ptr;
  161. HWFNC_DBG_Q("read_ptr:0x%pK queue: va=0x%pK pa=0x%pK read_ptr_payload:0x%pK\n", read_ptr,
  162. queue->va_queue, queue->pa_queue, read_ptr_payload);
  163. /* Calculate the index after the read */
  164. to_read_idx = read_idx + payload_size_u32;
  165. /*
  166. * wrap-around case, here we are reading the last element of the queue, therefore set
  167. * to_read_idx, which is the index after the read, to the beginning of the
  168. * queue
  169. */
  170. if (to_read_idx >= q_size_u32)
  171. to_read_idx = 0;
  172. /* Read the Client Queue */
  173. payload->ctxt_id = readq_relaxed(&read_ptr_payload->ctxt_id);
  174. payload->seqno = readq_relaxed(&read_ptr_payload->seqno);
  175. payload->hash = readq_relaxed(&read_ptr_payload->hash);
  176. payload->flags = readq_relaxed(&read_ptr_payload->flags);
  177. payload->error = readl_relaxed(&read_ptr_payload->error);
  178. /* update the read index */
  179. writel_relaxed(to_read_idx, &hfi_header->read_index);
  180. /* update memory for the index */
  181. wmb();
  182. /* Return one if queue still has contents after read */
  183. return to_read_idx == write_idx ? 0 : 1;
  184. }
  185. /*
  186. * This function writes to the queue of the client. The 'queue_type' determines
  187. * if this function is writing to the rx or tx queue
  188. */
  189. int hw_fence_update_queue(struct hw_fence_driver_data *drv_data,
  190. struct msm_hw_fence_client *hw_fence_client, u64 ctxt_id, u64 seqno, u64 hash,
  191. u64 flags, u32 error, int queue_type)
  192. {
  193. struct msm_hw_fence_hfi_queue_header *hfi_header;
  194. struct msm_hw_fence_queue *queue;
  195. u32 read_idx;
  196. u32 write_idx;
  197. u32 to_write_idx;
  198. u32 q_size_u32;
  199. u32 q_free_u32;
  200. u32 *q_payload_write_ptr;
  201. u32 payload_size, payload_size_u32;
  202. struct msm_hw_fence_queue_payload *write_ptr_payload;
  203. bool lock_client = false;
  204. u32 lock_idx;
  205. u64 timestamp;
  206. int ret = 0;
  207. if (queue_type >= HW_FENCE_CLIENT_QUEUES) {
  208. HWFNC_ERR("Invalid queue type:%s\n", queue_type);
  209. return -EINVAL;
  210. }
  211. queue = &hw_fence_client->queues[queue_type];
  212. hfi_header = queue->va_header;
  213. q_size_u32 = (queue->q_size_bytes / sizeof(u32));
  214. payload_size = sizeof(struct msm_hw_fence_queue_payload);
  215. payload_size_u32 = (payload_size / sizeof(u32));
  216. if (!hfi_header) {
  217. HWFNC_ERR("Invalid queue\n");
  218. return -EINVAL;
  219. }
  220. /*
  221. * We need to lock the client if there is an Rx Queue update, since that
  222. * is the only time when HW Fence driver can have a race condition updating
  223. * the Rx Queue, which also could be getting updated by the Fence CTL
  224. */
  225. lock_client = _lock_client_queue(queue_type);
  226. if (lock_client) {
  227. lock_idx = hw_fence_client->client_id - 1;
  228. if (lock_idx >= drv_data->client_lock_tbl_cnt) {
  229. HWFNC_ERR("lock for client id:%d exceed max:%d\n",
  230. hw_fence_client->client_id, drv_data->client_lock_tbl_cnt);
  231. return -EINVAL;
  232. }
  233. HWFNC_DBG_Q("Locking client id:%d: idx:%d\n", hw_fence_client->client_id, lock_idx);
  234. /* lock the client rx queue to update */
  235. GLOBAL_ATOMIC_STORE(drv_data, &drv_data->client_lock_tbl[lock_idx], 1); /* lock */
  236. }
  237. /* Make sure data is ready before read */
  238. mb();
  239. /* Get read and write index */
  240. read_idx = readl_relaxed(&hfi_header->read_index);
  241. write_idx = readl_relaxed(&hfi_header->write_index);
  242. HWFNC_DBG_Q("wr client:%d rd_ptr:0x%pK wr_ptr:0x%pK rd_idx:%d wr_idx:%d q:0x%pK type:%d\n",
  243. hw_fence_client->client_id, &hfi_header->read_index, &hfi_header->write_index,
  244. read_idx, write_idx, queue, queue_type);
  245. /* Check queue to make sure message will fit */
  246. q_free_u32 = read_idx <= write_idx ? (q_size_u32 - (write_idx - read_idx)) :
  247. (read_idx - write_idx);
  248. if (q_free_u32 <= payload_size_u32) {
  249. HWFNC_ERR("cannot fit the message size:%d\n", payload_size_u32);
  250. ret = -EINVAL;
  251. goto exit;
  252. }
  253. HWFNC_DBG_Q("q_free_u32:%d payload_size_u32:%d\n", q_free_u32, payload_size_u32);
  254. /* Move the pointer where we need to write and cast it */
  255. q_payload_write_ptr = ((u32 *)queue->va_queue + write_idx);
  256. write_ptr_payload = (struct msm_hw_fence_queue_payload *)q_payload_write_ptr;
  257. HWFNC_DBG_Q("q_payload_write_ptr:0x%pK queue: va=0x%pK pa=0x%pK write_ptr_payload:0x%pK\n",
  258. q_payload_write_ptr, queue->va_queue, queue->pa_queue, write_ptr_payload);
  259. /* calculate the index after the write */
  260. to_write_idx = write_idx + payload_size_u32;
  261. HWFNC_DBG_Q("to_write_idx:%d write_idx:%d payload_size:%u\n", to_write_idx, write_idx,
  262. payload_size_u32);
  263. HWFNC_DBG_L("client_id:%d update %s hash:%llu ctx_id:%llu seqno:%llu flags:%llu error:%u\n",
  264. hw_fence_client->client_id, _get_queue_type(queue_type),
  265. hash, ctxt_id, seqno, flags, error);
  266. /*
  267. * wrap-around case, here we are writing to the last element of the queue, therefore
  268. * set to_write_idx, which is the index after the write, to the beginning of the
  269. * queue
  270. */
  271. if (to_write_idx >= q_size_u32)
  272. to_write_idx = 0;
  273. /* Update Client Queue */
  274. writeq_relaxed(payload_size, &write_ptr_payload->size);
  275. writew_relaxed(HW_FENCE_PAYLOAD_TYPE_1, &write_ptr_payload->type);
  276. writew_relaxed(HW_FENCE_PAYLOAD_REV(1, 0), &write_ptr_payload->version);
  277. writeq_relaxed(ctxt_id, &write_ptr_payload->ctxt_id);
  278. writeq_relaxed(seqno, &write_ptr_payload->seqno);
  279. writeq_relaxed(hash, &write_ptr_payload->hash);
  280. writeq_relaxed(flags, &write_ptr_payload->flags);
  281. writel_relaxed(error, &write_ptr_payload->error);
  282. timestamp = hw_fence_get_qtime(drv_data);
  283. writel_relaxed(timestamp, &write_ptr_payload->timestamp_lo);
  284. writel_relaxed(timestamp >> 32, &write_ptr_payload->timestamp_hi);
  285. /* update memory for the message */
  286. wmb();
  287. /* update the write index */
  288. writel_relaxed(to_write_idx, &hfi_header->write_index);
  289. /* update memory for the index */
  290. wmb();
  291. exit:
  292. if (lock_client)
  293. GLOBAL_ATOMIC_STORE(drv_data, &drv_data->client_lock_tbl[lock_idx], 0); /* unlock */
  294. return ret;
  295. }
  296. static int init_global_locks(struct hw_fence_driver_data *drv_data)
  297. {
  298. struct msm_hw_fence_mem_addr *mem_descriptor;
  299. phys_addr_t phys;
  300. void *ptr;
  301. u32 size;
  302. int ret;
  303. ret = hw_fence_utils_reserve_mem(drv_data, HW_FENCE_MEM_RESERVE_LOCKS_REGION, &phys, &ptr,
  304. &size, 0);
  305. if (ret) {
  306. HWFNC_ERR("Failed to reserve clients locks mem %d\n", ret);
  307. return -ENOMEM;
  308. }
  309. HWFNC_DBG_INIT("phys:0x%x ptr:0x%pK size:%d\n", phys, ptr, size);
  310. /* Populate Memory descriptor with address */
  311. mem_descriptor = &drv_data->clients_locks_mem_desc;
  312. mem_descriptor->virtual_addr = ptr;
  313. mem_descriptor->device_addr = phys;
  314. mem_descriptor->size = size;
  315. mem_descriptor->mem_data = NULL; /* not storing special info for now */
  316. /* Initialize internal pointers for managing the tables */
  317. drv_data->client_lock_tbl = (u64 *)drv_data->clients_locks_mem_desc.virtual_addr;
  318. drv_data->client_lock_tbl_cnt = drv_data->clients_locks_mem_desc.size / sizeof(u64);
  319. return 0;
  320. }
  321. static int init_hw_fences_table(struct hw_fence_driver_data *drv_data)
  322. {
  323. struct msm_hw_fence_mem_addr *mem_descriptor;
  324. phys_addr_t phys;
  325. void *ptr;
  326. u32 size;
  327. int ret;
  328. ret = hw_fence_utils_reserve_mem(drv_data, HW_FENCE_MEM_RESERVE_TABLE, &phys, &ptr,
  329. &size, 0);
  330. if (ret) {
  331. HWFNC_ERR("Failed to reserve table mem %d\n", ret);
  332. return -ENOMEM;
  333. }
  334. HWFNC_DBG_INIT("phys:0x%x ptr:0x%pK size:%d\n", phys, ptr, size);
  335. /* Populate Memory descriptor with address */
  336. mem_descriptor = &drv_data->hw_fences_mem_desc;
  337. mem_descriptor->virtual_addr = ptr;
  338. mem_descriptor->device_addr = phys;
  339. mem_descriptor->size = size;
  340. mem_descriptor->mem_data = NULL; /* not storing special info for now */
  341. /* Initialize internal pointers for managing the tables */
  342. drv_data->hw_fences_tbl = (struct msm_hw_fence *)drv_data->hw_fences_mem_desc.virtual_addr;
  343. drv_data->hw_fences_tbl_cnt = drv_data->hw_fences_mem_desc.size /
  344. sizeof(struct msm_hw_fence);
  345. HWFNC_DBG_INIT("hw_fences_table:0x%pK cnt:%u\n", drv_data->hw_fences_tbl,
  346. drv_data->hw_fences_tbl_cnt);
  347. return 0;
  348. }
  349. static int init_ctrl_queue(struct hw_fence_driver_data *drv_data)
  350. {
  351. struct msm_hw_fence_mem_addr *mem_descriptor;
  352. int ret;
  353. mem_descriptor = &drv_data->ctrl_queue_mem_desc;
  354. /* Init ctrl queue */
  355. ret = init_hw_fences_queues(drv_data, HW_FENCE_MEM_RESERVE_CTRL_QUEUE,
  356. mem_descriptor, drv_data->ctrl_queues,
  357. HW_FENCE_CTRL_QUEUES, 0);
  358. if (ret)
  359. HWFNC_ERR("Failure to init ctrl queue\n");
  360. return ret;
  361. }
  362. int hw_fence_init(struct hw_fence_driver_data *drv_data)
  363. {
  364. int ret;
  365. __le32 *mem;
  366. ret = hw_fence_utils_parse_dt_props(drv_data);
  367. if (ret) {
  368. HWFNC_ERR("failed to set dt properties\n");
  369. goto exit;
  370. }
  371. /* Allocate hw fence driver mem pool and share it with HYP */
  372. ret = hw_fence_utils_alloc_mem(drv_data);
  373. if (ret) {
  374. HWFNC_ERR("failed to alloc base memory\n");
  375. goto exit;
  376. }
  377. /* Initialize ctrl queue */
  378. ret = init_ctrl_queue(drv_data);
  379. if (ret)
  380. goto exit;
  381. ret = init_global_locks(drv_data);
  382. if (ret)
  383. goto exit;
  384. HWFNC_DBG_INIT("Locks allocated at 0x%pK total locks:%d\n", drv_data->client_lock_tbl,
  385. drv_data->client_lock_tbl_cnt);
  386. /* Initialize hw fences table */
  387. ret = init_hw_fences_table(drv_data);
  388. if (ret)
  389. goto exit;
  390. /* Map ipcc registers */
  391. ret = hw_fence_utils_map_ipcc(drv_data);
  392. if (ret) {
  393. HWFNC_ERR("ipcc regs mapping failed\n");
  394. goto exit;
  395. }
  396. /* Map time register */
  397. ret = hw_fence_utils_map_qtime(drv_data);
  398. if (ret) {
  399. HWFNC_ERR("qtime reg mapping failed\n");
  400. goto exit;
  401. }
  402. /* Map ctl_start registers */
  403. ret = hw_fence_utils_map_ctl_start(drv_data);
  404. if (ret) {
  405. /* This is not fatal error, since platfoms with dpu-ipc
  406. * won't use this option
  407. */
  408. HWFNC_WARN("no ctl_start regs, won't trigger the frame\n");
  409. }
  410. /* Init debugfs */
  411. ret = hw_fence_debug_debugfs_register(drv_data);
  412. if (ret) {
  413. HWFNC_ERR("debugfs init failed\n");
  414. goto exit;
  415. }
  416. /* Init vIRQ from VM */
  417. ret = hw_fence_utils_init_virq(drv_data);
  418. if (ret) {
  419. HWFNC_ERR("failed to init virq\n");
  420. goto exit;
  421. }
  422. mem = drv_data->io_mem_base;
  423. HWFNC_DBG_H("memory ptr:0x%pK val:0x%x\n", mem, *mem);
  424. HWFNC_DBG_INIT("HW Fences Table Initialized: 0x%pK cnt:%d\n",
  425. drv_data->hw_fences_tbl, drv_data->hw_fences_tbl_cnt);
  426. exit:
  427. return ret;
  428. }
  429. int hw_fence_alloc_client_resources(struct hw_fence_driver_data *drv_data,
  430. struct msm_hw_fence_client *hw_fence_client,
  431. struct msm_hw_fence_mem_addr *mem_descriptor)
  432. {
  433. int ret;
  434. /* Init client queues */
  435. ret = init_hw_fences_queues(drv_data, HW_FENCE_MEM_RESERVE_CLIENT_QUEUE,
  436. &hw_fence_client->mem_descriptor, hw_fence_client->queues,
  437. HW_FENCE_CLIENT_QUEUES, hw_fence_client->client_id);
  438. if (ret) {
  439. HWFNC_ERR("Failure to init the queue for client:%d\n",
  440. hw_fence_client->client_id);
  441. goto exit;
  442. }
  443. /* Init client memory descriptor */
  444. memcpy(mem_descriptor, &hw_fence_client->mem_descriptor,
  445. sizeof(struct msm_hw_fence_mem_addr));
  446. exit:
  447. return ret;
  448. }
  449. int hw_fence_init_controller_signal(struct hw_fence_driver_data *drv_data,
  450. struct msm_hw_fence_client *hw_fence_client)
  451. {
  452. int ret = 0;
  453. /*
  454. * Initialize IPCC Signals for this client
  455. *
  456. * NOTE: Fore each Client HW-Core, the client drivers might be the ones making
  457. * it's own initialization (in case that any hw-sequence must be enforced),
  458. * however, if that is not the case, any per-client ipcc init to enable the
  459. * signaling, can go here.
  460. */
  461. switch (hw_fence_client->client_id) {
  462. case HW_FENCE_CLIENT_ID_CTX0:
  463. /* nothing to initialize for gpu client */
  464. break;
  465. #if IS_ENABLED(CONFIG_DEBUG_FS)
  466. case HW_FENCE_CLIENT_ID_VAL0:
  467. case HW_FENCE_CLIENT_ID_VAL1:
  468. case HW_FENCE_CLIENT_ID_VAL2:
  469. case HW_FENCE_CLIENT_ID_VAL3:
  470. case HW_FENCE_CLIENT_ID_VAL4:
  471. case HW_FENCE_CLIENT_ID_VAL5:
  472. case HW_FENCE_CLIENT_ID_VAL6:
  473. /* nothing to initialize for validation clients */
  474. break;
  475. #endif /* CONFIG_DEBUG_FS */
  476. case HW_FENCE_CLIENT_ID_CTL0:
  477. case HW_FENCE_CLIENT_ID_CTL1:
  478. case HW_FENCE_CLIENT_ID_CTL2:
  479. case HW_FENCE_CLIENT_ID_CTL3:
  480. case HW_FENCE_CLIENT_ID_CTL4:
  481. case HW_FENCE_CLIENT_ID_CTL5:
  482. #ifdef HW_DPU_IPCC
  483. /* initialize ipcc signals for dpu clients */
  484. HWFNC_DBG_H("init_controller_signal: DPU client:%d initialized:%d\n",
  485. hw_fence_client->client_id, drv_data->ipcc_dpu_initialized);
  486. if (!drv_data->ipcc_dpu_initialized) {
  487. drv_data->ipcc_dpu_initialized = true;
  488. /* Init dpu client ipcc signal */
  489. hw_fence_ipcc_enable_dpu_signaling(drv_data);
  490. }
  491. #endif /* HW_DPU_IPCC */
  492. break;
  493. default:
  494. HWFNC_ERR("Unexpected client:%d\n", hw_fence_client->client_id);
  495. ret = -EINVAL;
  496. break;
  497. }
  498. return ret;
  499. }
  500. int hw_fence_init_controller_resources(struct msm_hw_fence_client *hw_fence_client)
  501. {
  502. /*
  503. * Initialize Fence Controller resources for this Client,
  504. * here we need to use the CTRL queue to communicate to the Fence
  505. * Controller the shared memory for the Rx/Tx queue for this client
  506. * as well as any information that Fence Controller might need to
  507. * know for this client.
  508. *
  509. * NOTE: For now, we are doing a static allocation of the
  510. * client's queues, so currently we don't need any notification
  511. * to the Fence CTL here through the CTRL queue.
  512. * Later-on we might need it, once the PVM to SVM (and vice versa)
  513. * communication for initialization is supported.
  514. */
  515. return 0;
  516. }
  517. void hw_fence_cleanup_client(struct hw_fence_driver_data *drv_data,
  518. struct msm_hw_fence_client *hw_fence_client)
  519. {
  520. /*
  521. * Deallocate any resource allocated for this client.
  522. * If fence controller was notified about existence of this client,
  523. * we will need to notify fence controller that this client is gone
  524. *
  525. * NOTE: Since currently we are doing a 'fixed' memory for the clients queues,
  526. * we don't need any notification to the Fence Controller, yet..
  527. * however, if the memory allocation is removed from 'fixed' to a dynamic
  528. * allocation, then we will need to notify FenceCTL about the client that is
  529. * going-away here.
  530. */
  531. mutex_lock(&drv_data->clients_register_lock);
  532. drv_data->clients[hw_fence_client->client_id] = NULL;
  533. mutex_unlock(&drv_data->clients_register_lock);
  534. /* Deallocate client's object */
  535. HWFNC_DBG_LUT("freeing client_id:%d\n", hw_fence_client->client_id);
  536. kfree(hw_fence_client);
  537. }
  538. static inline int _calculate_hash(u32 table_total_entries, u64 context, u64 seqno,
  539. u64 step, u64 *hash)
  540. {
  541. u64 m_size = table_total_entries;
  542. int val = 0;
  543. if (step == 0) {
  544. u64 a_multiplier = HW_FENCE_HASH_A_MULT;
  545. u64 c_multiplier = HW_FENCE_HASH_C_MULT;
  546. u64 b_multiplier = context + (context - 1); /* odd multiplier */
  547. /*
  548. * if m, is power of 2, we can optimize with right shift,
  549. * for now we don't do it, to avoid assuming a power of two
  550. */
  551. *hash = (a_multiplier * seqno * b_multiplier + (c_multiplier * context)) % m_size;
  552. } else {
  553. if (step >= m_size) {
  554. /*
  555. * If we already traversed the whole table, return failure since this means
  556. * there are not available spots, table is either full or full-enough
  557. * that we couldn't find an available spot after traverse the whole table.
  558. * Ideally table shouldn't be so full that we cannot find a value after some
  559. * iterations, so this maximum step size could be optimized to fail earlier.
  560. */
  561. HWFNC_ERR("Fence Table tranversed and no available space!\n");
  562. val = -EINVAL;
  563. } else {
  564. /*
  565. * Linearly increment the hash value to find next element in the table
  566. * note that this relies in the 'scrambled' data from the original hash
  567. * Also, add a mod division to wrap-around in case that we reached the
  568. * end of the table
  569. */
  570. *hash = (*hash + 1) % m_size;
  571. }
  572. }
  573. return val;
  574. }
  575. static inline struct msm_hw_fence *_get_hw_fence(u32 table_total_entries,
  576. struct msm_hw_fence *hw_fences_tbl,
  577. u64 hash)
  578. {
  579. if (hash >= table_total_entries) {
  580. HWFNC_ERR("hash:%llu out of max range:%llu\n",
  581. hash, table_total_entries);
  582. return NULL;
  583. }
  584. return &hw_fences_tbl[hash];
  585. }
  586. static bool _is_hw_fence_free(struct msm_hw_fence *hw_fence, u64 context, u64 seqno)
  587. {
  588. /* If valid is set, the hw fence is not free */
  589. return hw_fence->valid ? false : true;
  590. }
  591. static bool _hw_fence_match(struct msm_hw_fence *hw_fence, u64 context, u64 seqno)
  592. {
  593. return ((hw_fence->ctx_id == context && hw_fence->seq_id == seqno) ? true : false);
  594. }
  595. /* clears everything but the 'valid' field */
  596. static void _cleanup_hw_fence(struct msm_hw_fence *hw_fence)
  597. {
  598. int i;
  599. hw_fence->error = 0;
  600. wmb(); /* update memory to avoid mem-abort */
  601. hw_fence->ctx_id = 0;
  602. hw_fence->seq_id = 0;
  603. hw_fence->wait_client_mask = 0;
  604. hw_fence->fence_allocator = 0;
  605. hw_fence->fence_signal_client = 0;
  606. hw_fence->flags = 0;
  607. hw_fence->fence_create_time = 0;
  608. hw_fence->fence_trigger_time = 0;
  609. hw_fence->fence_wait_time = 0;
  610. hw_fence->debug_refcount = 0;
  611. hw_fence->parents_cnt = 0;
  612. hw_fence->pending_child_cnt = 0;
  613. for (i = 0; i < MSM_HW_FENCE_MAX_JOIN_PARENTS; i++)
  614. hw_fence->parent_list[i] = HW_FENCE_INVALID_PARENT_FENCE;
  615. }
  616. /* This function must be called with the hw fence lock */
  617. static void _reserve_hw_fence(struct hw_fence_driver_data *drv_data,
  618. struct msm_hw_fence *hw_fence, u32 client_id,
  619. u64 context, u64 seqno, u32 hash, u32 pending_child_cnt)
  620. {
  621. _cleanup_hw_fence(hw_fence);
  622. /* reserve this HW fence */
  623. hw_fence->valid = 1;
  624. hw_fence->ctx_id = context;
  625. hw_fence->seq_id = seqno;
  626. hw_fence->flags = 0; /* fence just reserved, there shouldn't be any flags set */
  627. hw_fence->fence_allocator = client_id;
  628. hw_fence->fence_create_time = hw_fence_get_qtime(drv_data);
  629. hw_fence->debug_refcount++;
  630. HWFNC_DBG_LUT("Reserved fence client:%d ctx:%llu seq:%llu hash:%llu\n",
  631. client_id, context, seqno, hash);
  632. }
  633. /* This function must be called with the hw fence lock */
  634. static void _unreserve_hw_fence(struct hw_fence_driver_data *drv_data,
  635. struct msm_hw_fence *hw_fence, u32 client_id,
  636. u64 context, u64 seqno, u32 hash, u32 pending_child_cnt)
  637. {
  638. _cleanup_hw_fence(hw_fence);
  639. /* unreserve this HW fence */
  640. hw_fence->valid = 0;
  641. HWFNC_DBG_LUT("Unreserved fence client:%d ctx:%llu seq:%llu hash:%llu\n",
  642. client_id, context, seqno, hash);
  643. }
  644. /* This function must be called with the hw fence lock */
  645. static void _reserve_join_fence(struct hw_fence_driver_data *drv_data,
  646. struct msm_hw_fence *hw_fence, u32 client_id, u64 context,
  647. u64 seqno, u32 hash, u32 pending_child_cnt)
  648. {
  649. _cleanup_hw_fence(hw_fence);
  650. /* reserve this HW fence */
  651. hw_fence->valid = true;
  652. hw_fence->ctx_id = context;
  653. hw_fence->seq_id = seqno;
  654. hw_fence->fence_allocator = client_id;
  655. hw_fence->fence_create_time = hw_fence_get_qtime(drv_data);
  656. hw_fence->debug_refcount++;
  657. hw_fence->pending_child_cnt = pending_child_cnt;
  658. HWFNC_DBG_LUT("Reserved join fence client:%d ctx:%llu seq:%llu hash:%llu\n",
  659. client_id, context, seqno, hash);
  660. }
  661. /* This function must be called with the hw fence lock */
  662. static void _fence_found(struct hw_fence_driver_data *drv_data,
  663. struct msm_hw_fence *hw_fence, u32 client_id,
  664. u64 context, u64 seqno, u32 hash, u32 pending_child_cnt)
  665. {
  666. /*
  667. * Do nothing, when this find fence fn is invoked, all processing is done outside.
  668. * Currently just keeping this function for debugging purposes, can be removed
  669. * in final versions
  670. */
  671. HWFNC_DBG_LUT("Found fence client:%d ctx:%llu seq:%llu hash:%llu\n",
  672. client_id, context, seqno, hash);
  673. }
  674. char *_get_op_mode(enum hw_fence_lookup_ops op_code)
  675. {
  676. switch (op_code) {
  677. case HW_FENCE_LOOKUP_OP_CREATE:
  678. return "CREATE";
  679. case HW_FENCE_LOOKUP_OP_DESTROY:
  680. return "DESTROY";
  681. case HW_FENCE_LOOKUP_OP_CREATE_JOIN:
  682. return "CREATE_JOIN";
  683. case HW_FENCE_LOOKUP_OP_FIND_FENCE:
  684. return "FIND_FENCE";
  685. default:
  686. return "UNKNOWN";
  687. }
  688. return "UNKNOWN";
  689. }
  690. struct msm_hw_fence *_hw_fence_lookup_and_process(struct hw_fence_driver_data *drv_data,
  691. struct msm_hw_fence *hw_fences_tbl, u64 context, u64 seqno, u32 client_id,
  692. u32 pending_child_cnt, enum hw_fence_lookup_ops op_code, u64 *hash)
  693. {
  694. bool (*compare_fnc)(struct msm_hw_fence *hfence, u64 context, u64 seqno);
  695. void (*process_fnc)(struct hw_fence_driver_data *drv_data, struct msm_hw_fence *hfence,
  696. u32 client_id, u64 context, u64 seqno, u32 hash, u32 pending);
  697. struct msm_hw_fence *hw_fence = NULL;
  698. u64 step = 0;
  699. int ret = 0;
  700. bool hw_fence_found = false;
  701. if (!hash | !drv_data | !hw_fences_tbl) {
  702. HWFNC_ERR("Invalid input for hw_fence_lookup\n");
  703. return NULL;
  704. }
  705. *hash = ~0;
  706. HWFNC_DBG_LUT("hw_fence_lookup: %d\n", op_code);
  707. switch (op_code) {
  708. case HW_FENCE_LOOKUP_OP_CREATE:
  709. compare_fnc = &_is_hw_fence_free;
  710. process_fnc = &_reserve_hw_fence;
  711. break;
  712. case HW_FENCE_LOOKUP_OP_DESTROY:
  713. compare_fnc = &_hw_fence_match;
  714. process_fnc = &_unreserve_hw_fence;
  715. break;
  716. case HW_FENCE_LOOKUP_OP_CREATE_JOIN:
  717. compare_fnc = &_is_hw_fence_free;
  718. process_fnc = &_reserve_join_fence;
  719. break;
  720. case HW_FENCE_LOOKUP_OP_FIND_FENCE:
  721. compare_fnc = &_hw_fence_match;
  722. process_fnc = &_fence_found;
  723. break;
  724. default:
  725. HWFNC_ERR("Unknown op code:%d\n", op_code);
  726. return NULL;
  727. }
  728. while (!hw_fence_found && (step < drv_data->hw_fence_table_entries)) {
  729. /* Calculate the Hash for the Fence */
  730. ret = _calculate_hash(drv_data->hw_fence_table_entries, context, seqno, step, hash);
  731. if (ret) {
  732. HWFNC_ERR("error calculating hash ctx:%llu seqno:%llu hash:%llu\n",
  733. context, seqno, *hash);
  734. break;
  735. }
  736. HWFNC_DBG_LUT("calculated hash:%llu [ctx:%llu seqno:%llu]\n", *hash, context,
  737. seqno);
  738. /* Get element from the table using the hash */
  739. hw_fence = _get_hw_fence(drv_data->hw_fence_table_entries, hw_fences_tbl, *hash);
  740. HWFNC_DBG_LUT("hw_fence_tbl:0x%pK hw_fence:0x%pK, hash:%llu valid:0x%x\n",
  741. hw_fences_tbl, hw_fence, *hash, hw_fence ? hw_fence->valid : 0xbad);
  742. if (!hw_fence) {
  743. HWFNC_ERR("bad hw fence ctx:%llu seqno:%llu hash:%llu\n",
  744. context, seqno, *hash);
  745. break;
  746. }
  747. GLOBAL_ATOMIC_STORE(drv_data, &hw_fence->lock, 1);
  748. /* compare to either find a free fence or find an allocated fence */
  749. if (compare_fnc(hw_fence, context, seqno)) {
  750. /* Process the hw fence found by the algorithm */
  751. if (process_fnc) {
  752. process_fnc(drv_data, hw_fence, client_id, context, seqno, *hash,
  753. pending_child_cnt);
  754. /* update memory table with processing */
  755. wmb();
  756. }
  757. HWFNC_DBG_L("client_id:%lu op:%s ctx:%llu seqno:%llu hash:%llu step:%llu\n",
  758. client_id, _get_op_mode(op_code), context, seqno, *hash, step);
  759. hw_fence_found = true;
  760. } else {
  761. if ((op_code == HW_FENCE_LOOKUP_OP_CREATE ||
  762. op_code == HW_FENCE_LOOKUP_OP_CREATE_JOIN) &&
  763. seqno == hw_fence->seq_id && context == hw_fence->ctx_id) {
  764. /* ctx & seqno must be unique creating a hw-fence */
  765. HWFNC_ERR("cannot create hw fence with same ctx:%llu seqno:%llu\n",
  766. context, seqno);
  767. GLOBAL_ATOMIC_STORE(drv_data, &hw_fence->lock, 0);
  768. break;
  769. }
  770. /* compare can fail if we have a collision, we will linearly resolve it */
  771. HWFNC_DBG_H("compare failed for hash:%llu [ctx:%llu seqno:%llu]\n", *hash,
  772. context, seqno);
  773. }
  774. GLOBAL_ATOMIC_STORE(drv_data, &hw_fence->lock, 0);
  775. /* Increment step for the next loop */
  776. step++;
  777. }
  778. /* If we iterated through the whole list and didn't find the fence, return null */
  779. if (!hw_fence_found) {
  780. HWFNC_ERR("fail to create hw-fence step:%llu\n", step);
  781. hw_fence = NULL;
  782. }
  783. HWFNC_DBG_LUT("lookup:%d hw_fence:%pK ctx:%llu seqno:%llu hash:%llu flags:0x%llx\n",
  784. op_code, hw_fence, context, seqno, *hash, hw_fence ? hw_fence->flags : -1);
  785. return hw_fence;
  786. }
  787. int hw_fence_create(struct hw_fence_driver_data *drv_data,
  788. struct msm_hw_fence_client *hw_fence_client,
  789. u64 context, u64 seqno, u64 *hash)
  790. {
  791. u32 client_id = hw_fence_client->client_id;
  792. struct msm_hw_fence *hw_fences_tbl = drv_data->hw_fences_tbl;
  793. int ret = 0;
  794. /* allocate hw fence in table */
  795. if (!_hw_fence_lookup_and_process(drv_data, hw_fences_tbl,
  796. context, seqno, client_id, 0, HW_FENCE_LOOKUP_OP_CREATE, hash)) {
  797. HWFNC_ERR("Fail to create fence client:%lu ctx:%llu seqno:%llu\n",
  798. client_id, context, seqno);
  799. ret = -EINVAL;
  800. }
  801. return ret;
  802. }
  803. static inline int _hw_fence_cleanup(struct hw_fence_driver_data *drv_data,
  804. struct msm_hw_fence *hw_fences_tbl, u32 client_id, u64 context, u64 seqno) {
  805. u64 hash;
  806. if (!_hw_fence_lookup_and_process(drv_data, hw_fences_tbl,
  807. context, seqno, client_id, 0, HW_FENCE_LOOKUP_OP_DESTROY, &hash))
  808. return -EINVAL;
  809. return 0;
  810. }
  811. int hw_fence_destroy(struct hw_fence_driver_data *drv_data,
  812. struct msm_hw_fence_client *hw_fence_client,
  813. u64 context, u64 seqno)
  814. {
  815. u32 client_id = hw_fence_client->client_id;
  816. struct msm_hw_fence *hw_fences_tbl = drv_data->hw_fences_tbl;
  817. int ret = 0;
  818. /* remove hw fence from table*/
  819. if (_hw_fence_cleanup(drv_data, hw_fences_tbl, client_id, context, seqno)) {
  820. HWFNC_ERR("Fail destroying fence client:%lu ctx:%llu seqno:%llu\n",
  821. client_id, context, seqno);
  822. ret = -EINVAL;
  823. }
  824. return ret;
  825. }
  826. static struct msm_hw_fence *_hw_fence_process_join_fence(struct hw_fence_driver_data *drv_data,
  827. struct msm_hw_fence_client *hw_fence_client,
  828. struct dma_fence_array *array, u64 *hash, bool create)
  829. {
  830. struct msm_hw_fence *hw_fences_tbl;
  831. struct msm_hw_fence *join_fence = NULL;
  832. u64 context, seqno;
  833. u32 client_id, pending_child_cnt;
  834. /*
  835. * NOTE: For now we are allocating the join fences from the same table as all
  836. * the other fences (i.e. drv_data->hw_fences_tbl), functionally this will work, however,
  837. * this might impact the lookup algorithm, since the "join-fences" are created with the
  838. * context and seqno of a fence-array, and those might not be changing by the client,
  839. * so this will linearly increment the look-up and very likely impact the other fences if
  840. * these join-fences start to fill-up a particular region of the fences global table.
  841. * So we might have to allocate a different table altogether for these join fences.
  842. * However, to do this, just alloc another table and change it here:
  843. */
  844. hw_fences_tbl = drv_data->hw_fences_tbl;
  845. context = array->base.context;
  846. seqno = array->base.seqno;
  847. pending_child_cnt = array->num_fences;
  848. client_id = HW_FENCE_JOIN_FENCE_CLIENT_ID;
  849. if (create) {
  850. /* allocate the fence */
  851. join_fence = _hw_fence_lookup_and_process(drv_data, hw_fences_tbl, context,
  852. seqno, client_id, pending_child_cnt, HW_FENCE_LOOKUP_OP_CREATE_JOIN, hash);
  853. if (!join_fence)
  854. HWFNC_ERR("Fail to create join fence client:%lu ctx:%llu seqno:%llu\n",
  855. client_id, context, seqno);
  856. } else {
  857. /* destroy the fence */
  858. if (_hw_fence_cleanup(drv_data, hw_fences_tbl, client_id, context, seqno))
  859. HWFNC_ERR("Fail destroying join fence client:%lu ctx:%llu seqno:%llu\n",
  860. client_id, context, seqno);
  861. }
  862. return join_fence;
  863. }
  864. struct msm_hw_fence *msm_hw_fence_find(struct hw_fence_driver_data *drv_data,
  865. struct msm_hw_fence_client *hw_fence_client,
  866. u64 context, u64 seqno, u64 *hash)
  867. {
  868. struct msm_hw_fence *hw_fences_tbl = drv_data->hw_fences_tbl;
  869. struct msm_hw_fence *hw_fence;
  870. u32 client_id = hw_fence_client ? hw_fence_client->client_id : 0xff;
  871. /* find the hw fence */
  872. hw_fence = _hw_fence_lookup_and_process(drv_data, hw_fences_tbl, context,
  873. seqno, client_id, 0, HW_FENCE_LOOKUP_OP_FIND_FENCE, hash);
  874. if (!hw_fence)
  875. HWFNC_ERR("Fail to find hw fence client:%lu ctx:%llu seqno:%llu\n",
  876. client_id, context, seqno);
  877. return hw_fence;
  878. }
  879. static void _fence_ctl_signal(struct hw_fence_driver_data *drv_data,
  880. struct msm_hw_fence_client *hw_fence_client, struct msm_hw_fence *hw_fence, u64 hash,
  881. u64 flags, u32 error)
  882. {
  883. u32 tx_client_id = drv_data->ipcc_client_pid; /* phys id for tx client */
  884. u32 rx_client_id = hw_fence_client->ipc_client_vid; /* virt id for rx client */
  885. HWFNC_DBG_H("We must signal the client now! hfence hash:%llu\n", hash);
  886. /* Write to Rx queue */
  887. if (hw_fence_client->update_rxq)
  888. hw_fence_update_queue(drv_data, hw_fence_client, hw_fence->ctx_id,
  889. hw_fence->seq_id, hash, flags, error, HW_FENCE_RX_QUEUE - 1);
  890. /* Signal the hw fence now */
  891. if (hw_fence_client->send_ipc)
  892. hw_fence_ipcc_trigger_signal(drv_data, tx_client_id, rx_client_id,
  893. hw_fence_client->ipc_signal_id);
  894. }
  895. static void _cleanup_join_and_child_fences(struct hw_fence_driver_data *drv_data,
  896. struct msm_hw_fence_client *hw_fence_client, int iteration, struct dma_fence_array *array,
  897. struct msm_hw_fence *join_fence, u64 hash_join_fence)
  898. {
  899. struct dma_fence *child_fence;
  900. struct msm_hw_fence *hw_fence_child;
  901. int idx, j;
  902. u64 hash = 0;
  903. if (!array->fences)
  904. goto destroy_fence;
  905. /* cleanup the child-fences from the parent join-fence */
  906. for (idx = iteration; idx >= 0; idx--) {
  907. child_fence = array->fences[idx];
  908. if (!child_fence) {
  909. HWFNC_ERR("invalid child fence idx:%d\n", idx);
  910. continue;
  911. }
  912. hw_fence_child = msm_hw_fence_find(drv_data, hw_fence_client, child_fence->context,
  913. child_fence->seqno, &hash);
  914. if (!hw_fence_child) {
  915. HWFNC_ERR("Cannot cleanup child fence context:%lu seqno:%lu hash:%lu\n",
  916. child_fence->context, child_fence->seqno, hash);
  917. /*
  918. * ideally this should not have happened, but if it did, try to keep
  919. * cleaning-up other fences after printing the error
  920. */
  921. continue;
  922. }
  923. /* lock the child while we clean it up from the parent join-fence */
  924. GLOBAL_ATOMIC_STORE(drv_data, &hw_fence_child->lock, 1); /* lock */
  925. for (j = hw_fence_child->parents_cnt; j > 0; j--) {
  926. if (j > MSM_HW_FENCE_MAX_JOIN_PARENTS) {
  927. HWFNC_ERR("Invalid max parents_cnt:%d, will reset to max:%d\n",
  928. hw_fence_child->parents_cnt, MSM_HW_FENCE_MAX_JOIN_PARENTS);
  929. j = MSM_HW_FENCE_MAX_JOIN_PARENTS;
  930. }
  931. if (hw_fence_child->parent_list[j - 1] == hash_join_fence) {
  932. hw_fence_child->parent_list[j - 1] = HW_FENCE_INVALID_PARENT_FENCE;
  933. if (hw_fence_child->parents_cnt)
  934. hw_fence_child->parents_cnt--;
  935. /* update memory for the table update */
  936. wmb();
  937. }
  938. }
  939. GLOBAL_ATOMIC_STORE(drv_data, &hw_fence_child->lock, 0); /* unlock */
  940. }
  941. destroy_fence:
  942. /* destroy join fence */
  943. _hw_fence_process_join_fence(drv_data, hw_fence_client, array, &hash_join_fence,
  944. false);
  945. }
  946. int hw_fence_process_fence_array(struct hw_fence_driver_data *drv_data,
  947. struct msm_hw_fence_client *hw_fence_client, struct dma_fence_array *array)
  948. {
  949. struct msm_hw_fence *join_fence;
  950. struct msm_hw_fence *hw_fence_child;
  951. struct dma_fence *child_fence;
  952. bool signal_join_fence = false;
  953. u64 hash_join_fence, hash;
  954. int i, ret = 0;
  955. /*
  956. * Create join fence from the join-fences table,
  957. * This function initializes:
  958. * join_fence->pending_child_count = array->num_fences
  959. */
  960. join_fence = _hw_fence_process_join_fence(drv_data, hw_fence_client, array,
  961. &hash_join_fence, true);
  962. if (!join_fence) {
  963. HWFNC_ERR("cannot alloc hw fence for join fence array\n");
  964. return -EINVAL;
  965. }
  966. /* update this as waiting client of the join-fence */
  967. GLOBAL_ATOMIC_STORE(drv_data, &join_fence->lock, 1); /* lock */
  968. join_fence->wait_client_mask |= BIT(hw_fence_client->client_id);
  969. GLOBAL_ATOMIC_STORE(drv_data, &join_fence->lock, 0); /* unlock */
  970. /* Iterate through fences of the array */
  971. for (i = 0; i < array->num_fences; i++) {
  972. child_fence = array->fences[i];
  973. /* Nested fence-arrays are not supported */
  974. if (to_dma_fence_array(child_fence)) {
  975. HWFNC_ERR("This is a nested fence, fail!\n");
  976. ret = -EINVAL;
  977. goto error_array;
  978. }
  979. /* All elements in the fence-array must be hw-fences */
  980. if (!test_bit(MSM_HW_FENCE_FLAG_ENABLED_BIT, &child_fence->flags)) {
  981. HWFNC_ERR("DMA Fence in FenceArray is not a HW Fence\n");
  982. ret = -EINVAL;
  983. goto error_array;
  984. }
  985. /* Find the HW Fence in the Global Table */
  986. hw_fence_child = msm_hw_fence_find(drv_data, hw_fence_client, child_fence->context,
  987. child_fence->seqno, &hash);
  988. if (!hw_fence_child) {
  989. HWFNC_ERR("Cannot find child fence context:%lu seqno:%lu hash:%lu\n",
  990. child_fence->context, child_fence->seqno, hash);
  991. ret = -EINVAL;
  992. goto error_array;
  993. }
  994. GLOBAL_ATOMIC_STORE(drv_data, &hw_fence_child->lock, 1); /* lock */
  995. if (hw_fence_child->flags & MSM_HW_FENCE_FLAG_SIGNAL) {
  996. /* child fence is already signaled */
  997. GLOBAL_ATOMIC_STORE(drv_data, &join_fence->lock, 1); /* lock */
  998. if (--join_fence->pending_child_cnt == 0)
  999. signal_join_fence = true;
  1000. /* update memory for the table update */
  1001. wmb();
  1002. GLOBAL_ATOMIC_STORE(drv_data, &join_fence->lock, 0); /* unlock */
  1003. } else {
  1004. /* child fence is not signaled */
  1005. hw_fence_child->parents_cnt++;
  1006. if (hw_fence_child->parents_cnt >= MSM_HW_FENCE_MAX_JOIN_PARENTS
  1007. || hw_fence_child->parents_cnt < 1) {
  1008. /* Max number of parents for a fence is exceeded */
  1009. HWFNC_ERR("DMA Fence in FenceArray exceeds parents:%d\n",
  1010. hw_fence_child->parents_cnt);
  1011. hw_fence_child->parents_cnt--;
  1012. /* update memory for the table update */
  1013. wmb();
  1014. /* unlock */
  1015. GLOBAL_ATOMIC_STORE(drv_data, &hw_fence_child->lock, 0);
  1016. ret = -EINVAL;
  1017. goto error_array;
  1018. }
  1019. hw_fence_child->parent_list[hw_fence_child->parents_cnt - 1] =
  1020. hash_join_fence;
  1021. /* update memory for the table update */
  1022. wmb();
  1023. }
  1024. GLOBAL_ATOMIC_STORE(drv_data, &hw_fence_child->lock, 0); /* unlock */
  1025. }
  1026. /* all fences were signaled, signal client now */
  1027. if (signal_join_fence) {
  1028. /* signal the join hw fence */
  1029. _fence_ctl_signal(drv_data, hw_fence_client, join_fence, hash_join_fence, 0, 0);
  1030. set_bit(MSM_HW_FENCE_FLAG_SIGNALED_BIT, &array->base.flags);
  1031. /*
  1032. * job of the join-fence is finished since we already signaled,
  1033. * we can delete it now. This can happen when all the fences that
  1034. * are part of the join-fence are already signaled.
  1035. */
  1036. _hw_fence_process_join_fence(drv_data, hw_fence_client, array, &hash_join_fence,
  1037. false);
  1038. } else if (!array->num_fences) {
  1039. /*
  1040. * if we didn't signal the join-fence and the number of fences is not set in
  1041. * the fence-array, then fail here, otherwise driver would create a join-fence
  1042. * with no-childs that won't be signaled at all or an incomplete join-fence
  1043. */
  1044. HWFNC_ERR("invalid fence-array ctx:%llu seqno:%llu without fences\n",
  1045. array->base.context, array->base.seqno);
  1046. goto error_array;
  1047. }
  1048. return ret;
  1049. error_array:
  1050. _cleanup_join_and_child_fences(drv_data, hw_fence_client, i, array, join_fence,
  1051. hash_join_fence);
  1052. return -EINVAL;
  1053. }
  1054. int hw_fence_register_wait_client(struct hw_fence_driver_data *drv_data,
  1055. struct dma_fence *fence, struct msm_hw_fence_client *hw_fence_client, u64 context,
  1056. u64 seqno)
  1057. {
  1058. struct msm_hw_fence *hw_fence;
  1059. u64 hash;
  1060. /* find the hw fence within the table */
  1061. hw_fence = msm_hw_fence_find(drv_data, hw_fence_client, context, seqno, &hash);
  1062. if (!hw_fence) {
  1063. HWFNC_ERR("Cannot find fence!\n");
  1064. return -EINVAL;
  1065. }
  1066. GLOBAL_ATOMIC_STORE(drv_data, &hw_fence->lock, 1); /* lock */
  1067. /* register client in the hw fence */
  1068. hw_fence->wait_client_mask |= BIT(hw_fence_client->client_id);
  1069. hw_fence->fence_wait_time = hw_fence_get_qtime(drv_data);
  1070. hw_fence->debug_refcount++;
  1071. /* update memory for the table update */
  1072. wmb();
  1073. GLOBAL_ATOMIC_STORE(drv_data, &hw_fence->lock, 0); /* unlock */
  1074. /* if hw fence already signaled, signal the client */
  1075. if (hw_fence->flags & MSM_HW_FENCE_FLAG_SIGNAL) {
  1076. if (fence != NULL)
  1077. set_bit(MSM_HW_FENCE_FLAG_SIGNALED_BIT, &fence->flags);
  1078. _fence_ctl_signal(drv_data, hw_fence_client, hw_fence, hash, 0, 0);
  1079. }
  1080. return 0;
  1081. }
  1082. int hw_fence_process_fence(struct hw_fence_driver_data *drv_data,
  1083. struct msm_hw_fence_client *hw_fence_client,
  1084. struct dma_fence *fence)
  1085. {
  1086. int ret = 0;
  1087. if (!drv_data | !hw_fence_client | !fence) {
  1088. HWFNC_ERR("Invalid Input!\n");
  1089. return -EINVAL;
  1090. }
  1091. /* fence must be hw-fence */
  1092. if (!test_bit(MSM_HW_FENCE_FLAG_ENABLED_BIT, &fence->flags)) {
  1093. HWFNC_ERR("DMA Fence in is not a HW Fence flags:0x%llx\n", fence->flags);
  1094. return -EINVAL;
  1095. }
  1096. ret = hw_fence_register_wait_client(drv_data, fence, hw_fence_client, fence->context,
  1097. fence->seqno);
  1098. if (ret)
  1099. HWFNC_ERR("Error registering for wait client:%d\n", hw_fence_client->client_id);
  1100. return ret;
  1101. }
  1102. static void _signal_all_wait_clients(struct hw_fence_driver_data *drv_data,
  1103. struct msm_hw_fence *hw_fence, u64 hash, int error)
  1104. {
  1105. enum hw_fence_client_id wait_client_id;
  1106. struct msm_hw_fence_client *hw_fence_wait_client;
  1107. /* signal with an error all the waiting clients for this fence */
  1108. for (wait_client_id = 0; wait_client_id < HW_FENCE_CLIENT_MAX; wait_client_id++) {
  1109. if (hw_fence->wait_client_mask & BIT(wait_client_id)) {
  1110. hw_fence_wait_client = drv_data->clients[wait_client_id];
  1111. if (hw_fence_wait_client)
  1112. _fence_ctl_signal(drv_data, hw_fence_wait_client, hw_fence,
  1113. hash, 0, error);
  1114. }
  1115. }
  1116. }
  1117. int hw_fence_utils_cleanup_fence(struct hw_fence_driver_data *drv_data,
  1118. struct msm_hw_fence_client *hw_fence_client, struct msm_hw_fence *hw_fence, u64 hash,
  1119. u32 reset_flags)
  1120. {
  1121. int ret = 0;
  1122. int error = (reset_flags & MSM_HW_FENCE_RESET_WITHOUT_ERROR) ? 0 : MSM_HW_FENCE_ERROR_RESET;
  1123. GLOBAL_ATOMIC_STORE(drv_data, &hw_fence->lock, 1); /* lock */
  1124. if (hw_fence->wait_client_mask & BIT(hw_fence_client->client_id)) {
  1125. HWFNC_DBG_H("clearing client:%d wait bit for fence: ctx:%d seqno:%d\n",
  1126. hw_fence_client->client_id, hw_fence->ctx_id,
  1127. hw_fence->seq_id);
  1128. hw_fence->wait_client_mask &= ~BIT(hw_fence_client->client_id);
  1129. /* update memory for the table update */
  1130. wmb();
  1131. }
  1132. GLOBAL_ATOMIC_STORE(drv_data, &hw_fence->lock, 0); /* unlock */
  1133. if (hw_fence->fence_allocator == hw_fence_client->client_id) {
  1134. /* if fence is not signaled, signal with error all the waiting clients */
  1135. if (!(hw_fence->flags & MSM_HW_FENCE_FLAG_SIGNAL))
  1136. _signal_all_wait_clients(drv_data, hw_fence, hash, error);
  1137. if (reset_flags & MSM_HW_FENCE_RESET_WITHOUT_DESTROY)
  1138. goto skip_destroy;
  1139. ret = hw_fence_destroy(drv_data, hw_fence_client,
  1140. hw_fence->ctx_id, hw_fence->seq_id);
  1141. if (ret) {
  1142. HWFNC_ERR("Error destroying HW fence: ctx:%d seqno:%d\n",
  1143. hw_fence->ctx_id, hw_fence->seq_id);
  1144. }
  1145. }
  1146. skip_destroy:
  1147. return ret;
  1148. }