hw_fence_drv_priv.c 45 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
  4. */
  5. #include <linux/uaccess.h>
  6. #include <linux/of_platform.h>
  7. #include <linux/of_address.h>
  8. #include "hw_fence_drv_priv.h"
  9. #include "hw_fence_drv_utils.h"
  10. #include "hw_fence_drv_ipc.h"
  11. #include "hw_fence_drv_debug.h"
  12. /* Global atomic lock */
  13. #define GLOBAL_ATOMIC_STORE(drv_data, lock, val) global_atomic_store(drv_data, lock, val)
  14. inline u64 hw_fence_get_qtime(struct hw_fence_driver_data *drv_data)
  15. {
  16. #ifdef HWFENCE_USE_SLEEP_TIMER
  17. return readl_relaxed(drv_data->qtime_io_mem);
  18. #else /* USE QTIMER */
  19. return arch_timer_read_counter();
  20. #endif /* HWFENCE_USE_SLEEP_TIMER */
  21. }
  22. static int init_hw_fences_queues(struct hw_fence_driver_data *drv_data,
  23. enum hw_fence_mem_reserve mem_reserve_id,
  24. struct msm_hw_fence_mem_addr *mem_descriptor,
  25. struct msm_hw_fence_queue *queues, int queues_num,
  26. int client_id)
  27. {
  28. struct msm_hw_fence_hfi_queue_table_header *hfi_table_header;
  29. struct msm_hw_fence_hfi_queue_header *hfi_queue_header;
  30. void *ptr, *qptr;
  31. phys_addr_t phys, qphys;
  32. u32 size, start_queue_offset;
  33. int headers_size, queue_size, payload_size;
  34. int i, ret = 0;
  35. HWFNC_DBG_INIT("mem_reserve_id:%d client_id:%d\n", mem_reserve_id, client_id);
  36. switch (mem_reserve_id) {
  37. case HW_FENCE_MEM_RESERVE_CTRL_QUEUE:
  38. headers_size = HW_FENCE_HFI_CTRL_HEADERS_SIZE;
  39. queue_size = drv_data->hw_fence_ctrl_queue_size;
  40. payload_size = HW_FENCE_CTRL_QUEUE_PAYLOAD;
  41. break;
  42. case HW_FENCE_MEM_RESERVE_CLIENT_QUEUE:
  43. headers_size = HW_FENCE_HFI_CLIENT_HEADERS_SIZE;
  44. queue_size = drv_data->hw_fence_client_queue_size;
  45. payload_size = HW_FENCE_CLIENT_QUEUE_PAYLOAD;
  46. break;
  47. default:
  48. HWFNC_ERR("Unexpected mem reserve id: %d\n", mem_reserve_id);
  49. return -EINVAL;
  50. }
  51. /* Reserve Virtual and Physical memory for HFI headers */
  52. ret = hw_fence_utils_reserve_mem(drv_data, mem_reserve_id, &phys, &ptr, &size, client_id);
  53. if (ret) {
  54. HWFNC_ERR("Failed to reserve id:%d client %d\n", mem_reserve_id, client_id);
  55. return -ENOMEM;
  56. }
  57. HWFNC_DBG_INIT("phys:0x%x ptr:0x%pK size:%d\n", phys, ptr, size);
  58. /* Populate Memory descriptor with address */
  59. mem_descriptor->virtual_addr = ptr;
  60. mem_descriptor->device_addr = phys;
  61. mem_descriptor->size = size; /* bytes */
  62. mem_descriptor->mem_data = NULL; /* Currently we don't need any special info */
  63. HWFNC_DBG_INIT("Initialize headers\n");
  64. /* Initialize headers info within hfi memory */
  65. hfi_table_header = (struct msm_hw_fence_hfi_queue_table_header *)ptr;
  66. hfi_table_header->version = 0;
  67. hfi_table_header->size = size; /* bytes */
  68. /* Offset, from the Base Address, where the first queue header starts */
  69. hfi_table_header->qhdr0_offset =
  70. sizeof(struct msm_hw_fence_hfi_queue_table_header);
  71. hfi_table_header->qhdr_size =
  72. sizeof(struct msm_hw_fence_hfi_queue_header);
  73. hfi_table_header->num_q = queues_num; /* number of queues */
  74. hfi_table_header->num_active_q = queues_num;
  75. /* Initialize Queues Info within HFI memory */
  76. /*
  77. * Calculate offset where hfi queue header starts, which it is at the
  78. * end of the hfi table header
  79. */
  80. HWFNC_DBG_INIT("Initialize queues\n");
  81. hfi_queue_header = (struct msm_hw_fence_hfi_queue_header *)
  82. ((char *)ptr + HW_FENCE_HFI_TABLE_HEADER_SIZE);
  83. for (i = 0; i < queues_num; i++) {
  84. HWFNC_DBG_INIT("init queue[%d]\n", i);
  85. /* Calculate the offset where the Queue starts */
  86. start_queue_offset = headers_size + (i * queue_size); /* Bytes */
  87. qphys = phys + start_queue_offset; /* start of the PA for the queue elems */
  88. qptr = (char *)ptr + start_queue_offset; /* start of the va for queue elems */
  89. /* Set the physical start address in the HFI queue header */
  90. hfi_queue_header->start_addr = qphys;
  91. /* Set the queue type (i.e. RX or TX queue) */
  92. hfi_queue_header->type = (i == 0) ? HW_FENCE_TX_QUEUE : HW_FENCE_RX_QUEUE;
  93. /* Set the size of this header */
  94. hfi_queue_header->queue_size = queue_size;
  95. /* Set the payload size */
  96. hfi_queue_header->pkt_size = payload_size;
  97. /* Store Memory info in the Client data */
  98. queues[i].va_queue = qptr;
  99. queues[i].pa_queue = qphys;
  100. queues[i].va_header = hfi_queue_header;
  101. queues[i].q_size_bytes = queue_size;
  102. HWFNC_DBG_INIT("init:%s client:%d q[%d] va=0x%pK pa=0x%x hd:0x%pK sz:%u pkt:%d\n",
  103. hfi_queue_header->type == HW_FENCE_TX_QUEUE ? "TX_QUEUE" : "RX_QUEUE",
  104. client_id, i, queues[i].va_queue, queues[i].pa_queue, queues[i].va_header,
  105. queues[i].q_size_bytes, payload_size);
  106. /* Next header */
  107. hfi_queue_header++;
  108. }
  109. return ret;
  110. }
  111. static inline bool _lock_client_queue(int queue_type)
  112. {
  113. /* Only lock Rx Queue */
  114. return (queue_type == (HW_FENCE_RX_QUEUE - 1)) ? true : false;
  115. }
  116. char *_get_queue_type(int queue_type)
  117. {
  118. return (queue_type == (HW_FENCE_RX_QUEUE - 1)) ? "RXQ" : "TXQ";
  119. }
  120. int hw_fence_read_queue(struct msm_hw_fence_client *hw_fence_client,
  121. struct msm_hw_fence_queue_payload *payload, int queue_type)
  122. {
  123. struct msm_hw_fence_hfi_queue_header *hfi_header;
  124. struct msm_hw_fence_queue *queue;
  125. u32 read_idx;
  126. u32 write_idx;
  127. u32 to_read_idx;
  128. u32 *read_ptr;
  129. u32 payload_size_u32;
  130. u32 q_size_u32;
  131. struct msm_hw_fence_queue_payload *read_ptr_payload;
  132. if (queue_type >= HW_FENCE_CLIENT_QUEUES || !hw_fence_client || !payload) {
  133. HWFNC_ERR("Invalid queue type:%s hw_fence_client:0x%pK payload:0x%pK\n", queue_type,
  134. hw_fence_client, payload);
  135. return -EINVAL;
  136. }
  137. queue = &hw_fence_client->queues[queue_type];
  138. hfi_header = queue->va_header;
  139. q_size_u32 = (queue->q_size_bytes / sizeof(u32));
  140. payload_size_u32 = (sizeof(struct msm_hw_fence_queue_payload) / sizeof(u32));
  141. HWFNC_DBG_Q("sizeof payload:%d\n", sizeof(struct msm_hw_fence_queue_payload));
  142. if (!hfi_header || !payload) {
  143. HWFNC_ERR("Invalid queue\n");
  144. return -EINVAL;
  145. }
  146. /* Make sure data is ready before read */
  147. mb();
  148. /* Get read and write index */
  149. read_idx = readl_relaxed(&hfi_header->read_index);
  150. write_idx = readl_relaxed(&hfi_header->write_index);
  151. HWFNC_DBG_Q("read client:%d rd_ptr:0x%pK wr_ptr:0x%pK rd_idx:%d wr_idx:%d queue:0x%pK\n",
  152. hw_fence_client->client_id, &hfi_header->read_index, &hfi_header->write_index,
  153. read_idx, write_idx, queue);
  154. if (read_idx == write_idx) {
  155. HWFNC_DBG_Q("Nothing to read!\n");
  156. return 0;
  157. }
  158. /* Move the pointer where we need to read and cast it */
  159. read_ptr = ((u32 *)queue->va_queue + read_idx);
  160. read_ptr_payload = (struct msm_hw_fence_queue_payload *)read_ptr;
  161. HWFNC_DBG_Q("read_ptr:0x%pK queue: va=0x%pK pa=0x%pK read_ptr_payload:0x%pK\n", read_ptr,
  162. queue->va_queue, queue->pa_queue, read_ptr_payload);
  163. /* Calculate the index after the read */
  164. to_read_idx = read_idx + payload_size_u32;
  165. /*
  166. * wrap-around case, here we are reading the last element of the queue, therefore set
  167. * to_read_idx, which is the index after the read, to the beginning of the
  168. * queue
  169. */
  170. if (to_read_idx >= q_size_u32)
  171. to_read_idx = 0;
  172. /* Read the Client Queue */
  173. payload->ctxt_id = readq_relaxed(&read_ptr_payload->ctxt_id);
  174. payload->seqno = readq_relaxed(&read_ptr_payload->seqno);
  175. payload->hash = readq_relaxed(&read_ptr_payload->hash);
  176. payload->flags = readq_relaxed(&read_ptr_payload->flags);
  177. payload->client_data = readq_relaxed(&read_ptr_payload->client_data);
  178. payload->error = readl_relaxed(&read_ptr_payload->error);
  179. /* update the read index */
  180. writel_relaxed(to_read_idx, &hfi_header->read_index);
  181. /* update memory for the index */
  182. wmb();
  183. /* Return one if queue still has contents after read */
  184. return to_read_idx == write_idx ? 0 : 1;
  185. }
  186. /*
  187. * This function writes to the queue of the client. The 'queue_type' determines
  188. * if this function is writing to the rx or tx queue
  189. */
  190. int hw_fence_update_queue(struct hw_fence_driver_data *drv_data,
  191. struct msm_hw_fence_client *hw_fence_client, u64 ctxt_id, u64 seqno, u64 hash,
  192. u64 flags, u64 client_data, u32 error, int queue_type)
  193. {
  194. struct msm_hw_fence_hfi_queue_header *hfi_header;
  195. struct msm_hw_fence_queue *queue;
  196. u32 read_idx;
  197. u32 write_idx;
  198. u32 to_write_idx;
  199. u32 q_size_u32;
  200. u32 q_free_u32;
  201. u32 *q_payload_write_ptr;
  202. u32 payload_size, payload_size_u32;
  203. struct msm_hw_fence_queue_payload *write_ptr_payload;
  204. bool lock_client = false;
  205. u32 lock_idx;
  206. u64 timestamp;
  207. int ret = 0;
  208. if (queue_type >= HW_FENCE_CLIENT_QUEUES) {
  209. HWFNC_ERR("Invalid queue type:%s\n", queue_type);
  210. return -EINVAL;
  211. }
  212. queue = &hw_fence_client->queues[queue_type];
  213. hfi_header = queue->va_header;
  214. q_size_u32 = (queue->q_size_bytes / sizeof(u32));
  215. payload_size = sizeof(struct msm_hw_fence_queue_payload);
  216. payload_size_u32 = (payload_size / sizeof(u32));
  217. if (!hfi_header) {
  218. HWFNC_ERR("Invalid queue\n");
  219. return -EINVAL;
  220. }
  221. /*
  222. * We need to lock the client if there is an Rx Queue update, since that
  223. * is the only time when HW Fence driver can have a race condition updating
  224. * the Rx Queue, which also could be getting updated by the Fence CTL
  225. */
  226. lock_client = _lock_client_queue(queue_type);
  227. if (lock_client) {
  228. lock_idx = hw_fence_client->client_id - 1;
  229. if (lock_idx >= drv_data->client_lock_tbl_cnt) {
  230. HWFNC_ERR("lock for client id:%d exceed max:%d\n",
  231. hw_fence_client->client_id, drv_data->client_lock_tbl_cnt);
  232. return -EINVAL;
  233. }
  234. HWFNC_DBG_Q("Locking client id:%d: idx:%d\n", hw_fence_client->client_id, lock_idx);
  235. /* lock the client rx queue to update */
  236. GLOBAL_ATOMIC_STORE(drv_data, &drv_data->client_lock_tbl[lock_idx], 1); /* lock */
  237. }
  238. /* Make sure data is ready before read */
  239. mb();
  240. /* Get read and write index */
  241. read_idx = readl_relaxed(&hfi_header->read_index);
  242. write_idx = readl_relaxed(&hfi_header->write_index);
  243. HWFNC_DBG_Q("wr client:%d rd_ptr:0x%pK wr_ptr:0x%pK rd_idx:%d wr_idx:%d q:0x%pK type:%d\n",
  244. hw_fence_client->client_id, &hfi_header->read_index, &hfi_header->write_index,
  245. read_idx, write_idx, queue, queue_type);
  246. /* Check queue to make sure message will fit */
  247. q_free_u32 = read_idx <= write_idx ? (q_size_u32 - (write_idx - read_idx)) :
  248. (read_idx - write_idx);
  249. if (q_free_u32 <= payload_size_u32) {
  250. HWFNC_ERR("cannot fit the message size:%d\n", payload_size_u32);
  251. ret = -EINVAL;
  252. goto exit;
  253. }
  254. HWFNC_DBG_Q("q_free_u32:%d payload_size_u32:%d\n", q_free_u32, payload_size_u32);
  255. /* Move the pointer where we need to write and cast it */
  256. q_payload_write_ptr = ((u32 *)queue->va_queue + write_idx);
  257. write_ptr_payload = (struct msm_hw_fence_queue_payload *)q_payload_write_ptr;
  258. HWFNC_DBG_Q("q_payload_write_ptr:0x%pK queue: va=0x%pK pa=0x%pK write_ptr_payload:0x%pK\n",
  259. q_payload_write_ptr, queue->va_queue, queue->pa_queue, write_ptr_payload);
  260. /* calculate the index after the write */
  261. to_write_idx = write_idx + payload_size_u32;
  262. HWFNC_DBG_Q("to_write_idx:%d write_idx:%d payload_size:%u\n", to_write_idx, write_idx,
  263. payload_size_u32);
  264. HWFNC_DBG_L("client_id:%d update %s hash:%llu ctx_id:%llu seqno:%llu flags:%llu error:%u\n",
  265. hw_fence_client->client_id, _get_queue_type(queue_type),
  266. hash, ctxt_id, seqno, flags, error);
  267. /*
  268. * wrap-around case, here we are writing to the last element of the queue, therefore
  269. * set to_write_idx, which is the index after the write, to the beginning of the
  270. * queue
  271. */
  272. if (to_write_idx >= q_size_u32)
  273. to_write_idx = 0;
  274. /* Update Client Queue */
  275. writeq_relaxed(payload_size, &write_ptr_payload->size);
  276. writew_relaxed(HW_FENCE_PAYLOAD_TYPE_1, &write_ptr_payload->type);
  277. writew_relaxed(HW_FENCE_PAYLOAD_REV(1, 0), &write_ptr_payload->version);
  278. writeq_relaxed(ctxt_id, &write_ptr_payload->ctxt_id);
  279. writeq_relaxed(seqno, &write_ptr_payload->seqno);
  280. writeq_relaxed(hash, &write_ptr_payload->hash);
  281. writeq_relaxed(flags, &write_ptr_payload->flags);
  282. writeq_relaxed(client_data, &write_ptr_payload->client_data);
  283. writel_relaxed(error, &write_ptr_payload->error);
  284. timestamp = hw_fence_get_qtime(drv_data);
  285. writel_relaxed(timestamp, &write_ptr_payload->timestamp_lo);
  286. writel_relaxed(timestamp >> 32, &write_ptr_payload->timestamp_hi);
  287. /* update memory for the message */
  288. wmb();
  289. /* update the write index */
  290. writel_relaxed(to_write_idx, &hfi_header->write_index);
  291. /* update memory for the index */
  292. wmb();
  293. exit:
  294. if (lock_client)
  295. GLOBAL_ATOMIC_STORE(drv_data, &drv_data->client_lock_tbl[lock_idx], 0); /* unlock */
  296. return ret;
  297. }
  298. static int init_global_locks(struct hw_fence_driver_data *drv_data)
  299. {
  300. struct msm_hw_fence_mem_addr *mem_descriptor;
  301. phys_addr_t phys;
  302. void *ptr;
  303. u32 size;
  304. int ret;
  305. ret = hw_fence_utils_reserve_mem(drv_data, HW_FENCE_MEM_RESERVE_LOCKS_REGION, &phys, &ptr,
  306. &size, 0);
  307. if (ret) {
  308. HWFNC_ERR("Failed to reserve clients locks mem %d\n", ret);
  309. return -ENOMEM;
  310. }
  311. HWFNC_DBG_INIT("phys:0x%x ptr:0x%pK size:%d\n", phys, ptr, size);
  312. /* Populate Memory descriptor with address */
  313. mem_descriptor = &drv_data->clients_locks_mem_desc;
  314. mem_descriptor->virtual_addr = ptr;
  315. mem_descriptor->device_addr = phys;
  316. mem_descriptor->size = size;
  317. mem_descriptor->mem_data = NULL; /* not storing special info for now */
  318. /* Initialize internal pointers for managing the tables */
  319. drv_data->client_lock_tbl = (u64 *)drv_data->clients_locks_mem_desc.virtual_addr;
  320. drv_data->client_lock_tbl_cnt = drv_data->clients_locks_mem_desc.size / sizeof(u64);
  321. return 0;
  322. }
  323. static int init_hw_fences_table(struct hw_fence_driver_data *drv_data)
  324. {
  325. struct msm_hw_fence_mem_addr *mem_descriptor;
  326. phys_addr_t phys;
  327. void *ptr;
  328. u32 size;
  329. int ret;
  330. ret = hw_fence_utils_reserve_mem(drv_data, HW_FENCE_MEM_RESERVE_TABLE, &phys, &ptr,
  331. &size, 0);
  332. if (ret) {
  333. HWFNC_ERR("Failed to reserve table mem %d\n", ret);
  334. return -ENOMEM;
  335. }
  336. HWFNC_DBG_INIT("phys:0x%x ptr:0x%pK size:%d\n", phys, ptr, size);
  337. /* Populate Memory descriptor with address */
  338. mem_descriptor = &drv_data->hw_fences_mem_desc;
  339. mem_descriptor->virtual_addr = ptr;
  340. mem_descriptor->device_addr = phys;
  341. mem_descriptor->size = size;
  342. mem_descriptor->mem_data = NULL; /* not storing special info for now */
  343. /* Initialize internal pointers for managing the tables */
  344. drv_data->hw_fences_tbl = (struct msm_hw_fence *)drv_data->hw_fences_mem_desc.virtual_addr;
  345. drv_data->hw_fences_tbl_cnt = drv_data->hw_fences_mem_desc.size /
  346. sizeof(struct msm_hw_fence);
  347. HWFNC_DBG_INIT("hw_fences_table:0x%pK cnt:%u\n", drv_data->hw_fences_tbl,
  348. drv_data->hw_fences_tbl_cnt);
  349. return 0;
  350. }
  351. static int init_ctrl_queue(struct hw_fence_driver_data *drv_data)
  352. {
  353. struct msm_hw_fence_mem_addr *mem_descriptor;
  354. int ret;
  355. mem_descriptor = &drv_data->ctrl_queue_mem_desc;
  356. /* Init ctrl queue */
  357. ret = init_hw_fences_queues(drv_data, HW_FENCE_MEM_RESERVE_CTRL_QUEUE,
  358. mem_descriptor, drv_data->ctrl_queues,
  359. HW_FENCE_CTRL_QUEUES, 0);
  360. if (ret)
  361. HWFNC_ERR("Failure to init ctrl queue\n");
  362. return ret;
  363. }
  364. int hw_fence_init(struct hw_fence_driver_data *drv_data)
  365. {
  366. int ret;
  367. __le32 *mem;
  368. ret = hw_fence_utils_parse_dt_props(drv_data);
  369. if (ret) {
  370. HWFNC_ERR("failed to set dt properties\n");
  371. goto exit;
  372. }
  373. /* Allocate hw fence driver mem pool and share it with HYP */
  374. ret = hw_fence_utils_alloc_mem(drv_data);
  375. if (ret) {
  376. HWFNC_ERR("failed to alloc base memory\n");
  377. goto exit;
  378. }
  379. /* Initialize ctrl queue */
  380. ret = init_ctrl_queue(drv_data);
  381. if (ret)
  382. goto exit;
  383. ret = init_global_locks(drv_data);
  384. if (ret)
  385. goto exit;
  386. HWFNC_DBG_INIT("Locks allocated at 0x%pK total locks:%d\n", drv_data->client_lock_tbl,
  387. drv_data->client_lock_tbl_cnt);
  388. /* Initialize hw fences table */
  389. ret = init_hw_fences_table(drv_data);
  390. if (ret)
  391. goto exit;
  392. /* Map ipcc registers */
  393. ret = hw_fence_utils_map_ipcc(drv_data);
  394. if (ret) {
  395. HWFNC_ERR("ipcc regs mapping failed\n");
  396. goto exit;
  397. }
  398. /* Map time register */
  399. ret = hw_fence_utils_map_qtime(drv_data);
  400. if (ret) {
  401. HWFNC_ERR("qtime reg mapping failed\n");
  402. goto exit;
  403. }
  404. /* Map ctl_start registers */
  405. ret = hw_fence_utils_map_ctl_start(drv_data);
  406. if (ret) {
  407. /* This is not fatal error, since platfoms with dpu-ipc
  408. * won't use this option
  409. */
  410. HWFNC_WARN("no ctl_start regs, won't trigger the frame\n");
  411. }
  412. /* Init debugfs */
  413. ret = hw_fence_debug_debugfs_register(drv_data);
  414. if (ret) {
  415. HWFNC_ERR("debugfs init failed\n");
  416. goto exit;
  417. }
  418. /* Init vIRQ from VM */
  419. ret = hw_fence_utils_init_virq(drv_data);
  420. if (ret) {
  421. HWFNC_ERR("failed to init virq\n");
  422. goto exit;
  423. }
  424. mem = drv_data->io_mem_base;
  425. HWFNC_DBG_H("memory ptr:0x%pK val:0x%x\n", mem, *mem);
  426. HWFNC_DBG_INIT("HW Fences Table Initialized: 0x%pK cnt:%d\n",
  427. drv_data->hw_fences_tbl, drv_data->hw_fences_tbl_cnt);
  428. exit:
  429. return ret;
  430. }
  431. int hw_fence_alloc_client_resources(struct hw_fence_driver_data *drv_data,
  432. struct msm_hw_fence_client *hw_fence_client,
  433. struct msm_hw_fence_mem_addr *mem_descriptor)
  434. {
  435. int ret;
  436. /* Init client queues */
  437. ret = init_hw_fences_queues(drv_data, HW_FENCE_MEM_RESERVE_CLIENT_QUEUE,
  438. &hw_fence_client->mem_descriptor, hw_fence_client->queues,
  439. HW_FENCE_CLIENT_QUEUES, hw_fence_client->client_id);
  440. if (ret) {
  441. HWFNC_ERR("Failure to init the queue for client:%d\n",
  442. hw_fence_client->client_id);
  443. goto exit;
  444. }
  445. /* Init client memory descriptor */
  446. memcpy(mem_descriptor, &hw_fence_client->mem_descriptor,
  447. sizeof(struct msm_hw_fence_mem_addr));
  448. exit:
  449. return ret;
  450. }
  451. int hw_fence_init_controller_signal(struct hw_fence_driver_data *drv_data,
  452. struct msm_hw_fence_client *hw_fence_client)
  453. {
  454. int ret = 0;
  455. /*
  456. * Initialize IPCC Signals for this client
  457. *
  458. * NOTE: Fore each Client HW-Core, the client drivers might be the ones making
  459. * it's own initialization (in case that any hw-sequence must be enforced),
  460. * however, if that is not the case, any per-client ipcc init to enable the
  461. * signaling, can go here.
  462. */
  463. switch (hw_fence_client->client_id) {
  464. case HW_FENCE_CLIENT_ID_CTX0:
  465. /* nothing to initialize for gpu client */
  466. break;
  467. #if IS_ENABLED(CONFIG_DEBUG_FS)
  468. case HW_FENCE_CLIENT_ID_VAL0:
  469. case HW_FENCE_CLIENT_ID_VAL1:
  470. case HW_FENCE_CLIENT_ID_VAL2:
  471. case HW_FENCE_CLIENT_ID_VAL3:
  472. case HW_FENCE_CLIENT_ID_VAL4:
  473. case HW_FENCE_CLIENT_ID_VAL5:
  474. case HW_FENCE_CLIENT_ID_VAL6:
  475. /* nothing to initialize for validation clients */
  476. break;
  477. #endif /* CONFIG_DEBUG_FS */
  478. case HW_FENCE_CLIENT_ID_CTL0:
  479. case HW_FENCE_CLIENT_ID_CTL1:
  480. case HW_FENCE_CLIENT_ID_CTL2:
  481. case HW_FENCE_CLIENT_ID_CTL3:
  482. case HW_FENCE_CLIENT_ID_CTL4:
  483. case HW_FENCE_CLIENT_ID_CTL5:
  484. #ifdef HW_DPU_IPCC
  485. /* initialize ipcc signals for dpu clients */
  486. HWFNC_DBG_H("init_controller_signal: DPU client:%d initialized:%d\n",
  487. hw_fence_client->client_id, drv_data->ipcc_dpu_initialized);
  488. if (!drv_data->ipcc_dpu_initialized) {
  489. drv_data->ipcc_dpu_initialized = true;
  490. /* Init dpu client ipcc signal */
  491. hw_fence_ipcc_enable_dpu_signaling(drv_data);
  492. }
  493. #endif /* HW_DPU_IPCC */
  494. break;
  495. default:
  496. HWFNC_ERR("Unexpected client:%d\n", hw_fence_client->client_id);
  497. ret = -EINVAL;
  498. break;
  499. }
  500. return ret;
  501. }
  502. int hw_fence_init_controller_resources(struct msm_hw_fence_client *hw_fence_client)
  503. {
  504. /*
  505. * Initialize Fence Controller resources for this Client,
  506. * here we need to use the CTRL queue to communicate to the Fence
  507. * Controller the shared memory for the Rx/Tx queue for this client
  508. * as well as any information that Fence Controller might need to
  509. * know for this client.
  510. *
  511. * NOTE: For now, we are doing a static allocation of the
  512. * client's queues, so currently we don't need any notification
  513. * to the Fence CTL here through the CTRL queue.
  514. * Later-on we might need it, once the PVM to SVM (and vice versa)
  515. * communication for initialization is supported.
  516. */
  517. return 0;
  518. }
  519. void hw_fence_cleanup_client(struct hw_fence_driver_data *drv_data,
  520. struct msm_hw_fence_client *hw_fence_client)
  521. {
  522. /*
  523. * Deallocate any resource allocated for this client.
  524. * If fence controller was notified about existence of this client,
  525. * we will need to notify fence controller that this client is gone
  526. *
  527. * NOTE: Since currently we are doing a 'fixed' memory for the clients queues,
  528. * we don't need any notification to the Fence Controller, yet..
  529. * however, if the memory allocation is removed from 'fixed' to a dynamic
  530. * allocation, then we will need to notify FenceCTL about the client that is
  531. * going-away here.
  532. */
  533. mutex_lock(&drv_data->clients_register_lock);
  534. drv_data->clients[hw_fence_client->client_id] = NULL;
  535. mutex_unlock(&drv_data->clients_register_lock);
  536. /* Deallocate client's object */
  537. HWFNC_DBG_LUT("freeing client_id:%d\n", hw_fence_client->client_id);
  538. kfree(hw_fence_client);
  539. }
  540. static inline int _calculate_hash(u32 table_total_entries, u64 context, u64 seqno,
  541. u64 step, u64 *hash)
  542. {
  543. u64 m_size = table_total_entries;
  544. int val = 0;
  545. if (step == 0) {
  546. u64 a_multiplier = HW_FENCE_HASH_A_MULT;
  547. u64 c_multiplier = HW_FENCE_HASH_C_MULT;
  548. u64 b_multiplier = context + (context - 1); /* odd multiplier */
  549. /*
  550. * if m, is power of 2, we can optimize with right shift,
  551. * for now we don't do it, to avoid assuming a power of two
  552. */
  553. *hash = (a_multiplier * seqno * b_multiplier + (c_multiplier * context)) % m_size;
  554. } else {
  555. if (step >= m_size) {
  556. /*
  557. * If we already traversed the whole table, return failure since this means
  558. * there are not available spots, table is either full or full-enough
  559. * that we couldn't find an available spot after traverse the whole table.
  560. * Ideally table shouldn't be so full that we cannot find a value after some
  561. * iterations, so this maximum step size could be optimized to fail earlier.
  562. */
  563. HWFNC_ERR("Fence Table tranversed and no available space!\n");
  564. val = -EINVAL;
  565. } else {
  566. /*
  567. * Linearly increment the hash value to find next element in the table
  568. * note that this relies in the 'scrambled' data from the original hash
  569. * Also, add a mod division to wrap-around in case that we reached the
  570. * end of the table
  571. */
  572. *hash = (*hash + 1) % m_size;
  573. }
  574. }
  575. return val;
  576. }
  577. static inline struct msm_hw_fence *_get_hw_fence(u32 table_total_entries,
  578. struct msm_hw_fence *hw_fences_tbl,
  579. u64 hash)
  580. {
  581. if (hash >= table_total_entries) {
  582. HWFNC_ERR("hash:%llu out of max range:%llu\n",
  583. hash, table_total_entries);
  584. return NULL;
  585. }
  586. return &hw_fences_tbl[hash];
  587. }
  588. static bool _is_hw_fence_free(struct msm_hw_fence *hw_fence, u64 context, u64 seqno)
  589. {
  590. /* If valid is set, the hw fence is not free */
  591. return hw_fence->valid ? false : true;
  592. }
  593. static bool _hw_fence_match(struct msm_hw_fence *hw_fence, u64 context, u64 seqno)
  594. {
  595. return ((hw_fence->ctx_id == context && hw_fence->seq_id == seqno) ? true : false);
  596. }
  597. /* clears everything but the 'valid' field */
  598. static void _cleanup_hw_fence(struct msm_hw_fence *hw_fence)
  599. {
  600. int i;
  601. hw_fence->error = 0;
  602. wmb(); /* update memory to avoid mem-abort */
  603. hw_fence->ctx_id = 0;
  604. hw_fence->seq_id = 0;
  605. hw_fence->wait_client_mask = 0;
  606. hw_fence->fence_allocator = 0;
  607. hw_fence->fence_signal_client = 0;
  608. hw_fence->flags = 0;
  609. hw_fence->fence_create_time = 0;
  610. hw_fence->fence_trigger_time = 0;
  611. hw_fence->fence_wait_time = 0;
  612. hw_fence->debug_refcount = 0;
  613. hw_fence->parents_cnt = 0;
  614. hw_fence->pending_child_cnt = 0;
  615. for (i = 0; i < MSM_HW_FENCE_MAX_JOIN_PARENTS; i++)
  616. hw_fence->parent_list[i] = HW_FENCE_INVALID_PARENT_FENCE;
  617. memset(hw_fence->client_data, 0, sizeof(hw_fence->client_data));
  618. }
  619. /* This function must be called with the hw fence lock */
  620. static void _reserve_hw_fence(struct hw_fence_driver_data *drv_data,
  621. struct msm_hw_fence *hw_fence, u32 client_id,
  622. u64 context, u64 seqno, u32 hash, u32 pending_child_cnt)
  623. {
  624. _cleanup_hw_fence(hw_fence);
  625. /* reserve this HW fence */
  626. hw_fence->valid = 1;
  627. hw_fence->ctx_id = context;
  628. hw_fence->seq_id = seqno;
  629. hw_fence->flags = 0; /* fence just reserved, there shouldn't be any flags set */
  630. hw_fence->fence_allocator = client_id;
  631. hw_fence->fence_create_time = hw_fence_get_qtime(drv_data);
  632. hw_fence->debug_refcount++;
  633. HWFNC_DBG_LUT("Reserved fence client:%d ctx:%llu seq:%llu hash:%llu\n",
  634. client_id, context, seqno, hash);
  635. }
  636. /* This function must be called with the hw fence lock */
  637. static void _unreserve_hw_fence(struct hw_fence_driver_data *drv_data,
  638. struct msm_hw_fence *hw_fence, u32 client_id,
  639. u64 context, u64 seqno, u32 hash, u32 pending_child_cnt)
  640. {
  641. _cleanup_hw_fence(hw_fence);
  642. /* unreserve this HW fence */
  643. hw_fence->valid = 0;
  644. HWFNC_DBG_LUT("Unreserved fence client:%d ctx:%llu seq:%llu hash:%llu\n",
  645. client_id, context, seqno, hash);
  646. }
  647. /* This function must be called with the hw fence lock */
  648. static void _reserve_join_fence(struct hw_fence_driver_data *drv_data,
  649. struct msm_hw_fence *hw_fence, u32 client_id, u64 context,
  650. u64 seqno, u32 hash, u32 pending_child_cnt)
  651. {
  652. _cleanup_hw_fence(hw_fence);
  653. /* reserve this HW fence */
  654. hw_fence->valid = true;
  655. hw_fence->ctx_id = context;
  656. hw_fence->seq_id = seqno;
  657. hw_fence->fence_allocator = client_id;
  658. hw_fence->fence_create_time = hw_fence_get_qtime(drv_data);
  659. hw_fence->debug_refcount++;
  660. hw_fence->pending_child_cnt = pending_child_cnt;
  661. HWFNC_DBG_LUT("Reserved join fence client:%d ctx:%llu seq:%llu hash:%llu\n",
  662. client_id, context, seqno, hash);
  663. }
  664. /* This function must be called with the hw fence lock */
  665. static void _fence_found(struct hw_fence_driver_data *drv_data,
  666. struct msm_hw_fence *hw_fence, u32 client_id,
  667. u64 context, u64 seqno, u32 hash, u32 pending_child_cnt)
  668. {
  669. /*
  670. * Do nothing, when this find fence fn is invoked, all processing is done outside.
  671. * Currently just keeping this function for debugging purposes, can be removed
  672. * in final versions
  673. */
  674. HWFNC_DBG_LUT("Found fence client:%d ctx:%llu seq:%llu hash:%llu\n",
  675. client_id, context, seqno, hash);
  676. }
  677. char *_get_op_mode(enum hw_fence_lookup_ops op_code)
  678. {
  679. switch (op_code) {
  680. case HW_FENCE_LOOKUP_OP_CREATE:
  681. return "CREATE";
  682. case HW_FENCE_LOOKUP_OP_DESTROY:
  683. return "DESTROY";
  684. case HW_FENCE_LOOKUP_OP_CREATE_JOIN:
  685. return "CREATE_JOIN";
  686. case HW_FENCE_LOOKUP_OP_FIND_FENCE:
  687. return "FIND_FENCE";
  688. default:
  689. return "UNKNOWN";
  690. }
  691. return "UNKNOWN";
  692. }
  693. struct msm_hw_fence *_hw_fence_lookup_and_process(struct hw_fence_driver_data *drv_data,
  694. struct msm_hw_fence *hw_fences_tbl, u64 context, u64 seqno, u32 client_id,
  695. u32 pending_child_cnt, enum hw_fence_lookup_ops op_code, u64 *hash)
  696. {
  697. bool (*compare_fnc)(struct msm_hw_fence *hfence, u64 context, u64 seqno);
  698. void (*process_fnc)(struct hw_fence_driver_data *drv_data, struct msm_hw_fence *hfence,
  699. u32 client_id, u64 context, u64 seqno, u32 hash, u32 pending);
  700. struct msm_hw_fence *hw_fence = NULL;
  701. u64 step = 0;
  702. int ret = 0;
  703. bool hw_fence_found = false;
  704. if (!hash | !drv_data | !hw_fences_tbl) {
  705. HWFNC_ERR("Invalid input for hw_fence_lookup\n");
  706. return NULL;
  707. }
  708. *hash = ~0;
  709. HWFNC_DBG_LUT("hw_fence_lookup: %d\n", op_code);
  710. switch (op_code) {
  711. case HW_FENCE_LOOKUP_OP_CREATE:
  712. compare_fnc = &_is_hw_fence_free;
  713. process_fnc = &_reserve_hw_fence;
  714. break;
  715. case HW_FENCE_LOOKUP_OP_DESTROY:
  716. compare_fnc = &_hw_fence_match;
  717. process_fnc = &_unreserve_hw_fence;
  718. break;
  719. case HW_FENCE_LOOKUP_OP_CREATE_JOIN:
  720. compare_fnc = &_is_hw_fence_free;
  721. process_fnc = &_reserve_join_fence;
  722. break;
  723. case HW_FENCE_LOOKUP_OP_FIND_FENCE:
  724. compare_fnc = &_hw_fence_match;
  725. process_fnc = &_fence_found;
  726. break;
  727. default:
  728. HWFNC_ERR("Unknown op code:%d\n", op_code);
  729. return NULL;
  730. }
  731. while (!hw_fence_found && (step < drv_data->hw_fence_table_entries)) {
  732. /* Calculate the Hash for the Fence */
  733. ret = _calculate_hash(drv_data->hw_fence_table_entries, context, seqno, step, hash);
  734. if (ret) {
  735. HWFNC_ERR("error calculating hash ctx:%llu seqno:%llu hash:%llu\n",
  736. context, seqno, *hash);
  737. break;
  738. }
  739. HWFNC_DBG_LUT("calculated hash:%llu [ctx:%llu seqno:%llu]\n", *hash, context,
  740. seqno);
  741. /* Get element from the table using the hash */
  742. hw_fence = _get_hw_fence(drv_data->hw_fence_table_entries, hw_fences_tbl, *hash);
  743. HWFNC_DBG_LUT("hw_fence_tbl:0x%pK hw_fence:0x%pK, hash:%llu valid:0x%x\n",
  744. hw_fences_tbl, hw_fence, *hash, hw_fence ? hw_fence->valid : 0xbad);
  745. if (!hw_fence) {
  746. HWFNC_ERR("bad hw fence ctx:%llu seqno:%llu hash:%llu\n",
  747. context, seqno, *hash);
  748. break;
  749. }
  750. GLOBAL_ATOMIC_STORE(drv_data, &hw_fence->lock, 1);
  751. /* compare to either find a free fence or find an allocated fence */
  752. if (compare_fnc(hw_fence, context, seqno)) {
  753. /* Process the hw fence found by the algorithm */
  754. if (process_fnc) {
  755. process_fnc(drv_data, hw_fence, client_id, context, seqno, *hash,
  756. pending_child_cnt);
  757. /* update memory table with processing */
  758. wmb();
  759. }
  760. HWFNC_DBG_L("client_id:%lu op:%s ctx:%llu seqno:%llu hash:%llu step:%llu\n",
  761. client_id, _get_op_mode(op_code), context, seqno, *hash, step);
  762. hw_fence_found = true;
  763. } else {
  764. if ((op_code == HW_FENCE_LOOKUP_OP_CREATE ||
  765. op_code == HW_FENCE_LOOKUP_OP_CREATE_JOIN) &&
  766. seqno == hw_fence->seq_id && context == hw_fence->ctx_id) {
  767. /* ctx & seqno must be unique creating a hw-fence */
  768. HWFNC_ERR("cannot create hw fence with same ctx:%llu seqno:%llu\n",
  769. context, seqno);
  770. GLOBAL_ATOMIC_STORE(drv_data, &hw_fence->lock, 0);
  771. break;
  772. }
  773. /* compare can fail if we have a collision, we will linearly resolve it */
  774. HWFNC_DBG_H("compare failed for hash:%llu [ctx:%llu seqno:%llu]\n", *hash,
  775. context, seqno);
  776. }
  777. GLOBAL_ATOMIC_STORE(drv_data, &hw_fence->lock, 0);
  778. /* Increment step for the next loop */
  779. step++;
  780. }
  781. /* If we iterated through the whole list and didn't find the fence, return null */
  782. if (!hw_fence_found) {
  783. HWFNC_ERR("fail to create hw-fence step:%llu\n", step);
  784. hw_fence = NULL;
  785. }
  786. HWFNC_DBG_LUT("lookup:%d hw_fence:%pK ctx:%llu seqno:%llu hash:%llu flags:0x%llx\n",
  787. op_code, hw_fence, context, seqno, *hash, hw_fence ? hw_fence->flags : -1);
  788. return hw_fence;
  789. }
  790. int hw_fence_create(struct hw_fence_driver_data *drv_data,
  791. struct msm_hw_fence_client *hw_fence_client,
  792. u64 context, u64 seqno, u64 *hash)
  793. {
  794. u32 client_id = hw_fence_client->client_id;
  795. struct msm_hw_fence *hw_fences_tbl = drv_data->hw_fences_tbl;
  796. int ret = 0;
  797. /* allocate hw fence in table */
  798. if (!_hw_fence_lookup_and_process(drv_data, hw_fences_tbl,
  799. context, seqno, client_id, 0, HW_FENCE_LOOKUP_OP_CREATE, hash)) {
  800. HWFNC_ERR("Fail to create fence client:%lu ctx:%llu seqno:%llu\n",
  801. client_id, context, seqno);
  802. ret = -EINVAL;
  803. }
  804. return ret;
  805. }
  806. static inline int _hw_fence_cleanup(struct hw_fence_driver_data *drv_data,
  807. struct msm_hw_fence *hw_fences_tbl, u32 client_id, u64 context, u64 seqno) {
  808. u64 hash;
  809. if (!_hw_fence_lookup_and_process(drv_data, hw_fences_tbl,
  810. context, seqno, client_id, 0, HW_FENCE_LOOKUP_OP_DESTROY, &hash))
  811. return -EINVAL;
  812. return 0;
  813. }
  814. int hw_fence_destroy(struct hw_fence_driver_data *drv_data,
  815. struct msm_hw_fence_client *hw_fence_client,
  816. u64 context, u64 seqno)
  817. {
  818. u32 client_id = hw_fence_client->client_id;
  819. struct msm_hw_fence *hw_fences_tbl = drv_data->hw_fences_tbl;
  820. int ret = 0;
  821. /* remove hw fence from table*/
  822. if (_hw_fence_cleanup(drv_data, hw_fences_tbl, client_id, context, seqno)) {
  823. HWFNC_ERR("Fail destroying fence client:%lu ctx:%llu seqno:%llu\n",
  824. client_id, context, seqno);
  825. ret = -EINVAL;
  826. }
  827. return ret;
  828. }
  829. int hw_fence_destroy_with_hash(struct hw_fence_driver_data *drv_data,
  830. struct msm_hw_fence_client *hw_fence_client, u64 hash)
  831. {
  832. u32 client_id = hw_fence_client->client_id;
  833. struct msm_hw_fence *hw_fences_tbl = drv_data->hw_fences_tbl;
  834. struct msm_hw_fence *hw_fence = NULL;
  835. int ret = 0;
  836. hw_fence = _get_hw_fence(drv_data->hw_fence_table_entries, hw_fences_tbl, hash);
  837. if (!hw_fence) {
  838. HWFNC_ERR("bad hw fence hash:%llu client:%lu\n", hash, client_id);
  839. return -EINVAL;
  840. }
  841. if (hw_fence->fence_allocator != client_id) {
  842. HWFNC_ERR("client:%lu cannot destroy fence hash:%llu fence_allocator:%lu\n",
  843. client_id, hash, hw_fence->fence_allocator);
  844. return -EINVAL;
  845. }
  846. /* remove hw fence from table*/
  847. if (_hw_fence_cleanup(drv_data, hw_fences_tbl, client_id, hw_fence->ctx_id,
  848. hw_fence->seq_id)) {
  849. HWFNC_ERR("Fail destroying fence client:%lu ctx:%llu seqno:%llu hash:%llu\n",
  850. client_id, hw_fence->ctx_id, hw_fence->seq_id, hash);
  851. ret = -EINVAL;
  852. }
  853. return ret;
  854. }
  855. static struct msm_hw_fence *_hw_fence_process_join_fence(struct hw_fence_driver_data *drv_data,
  856. struct msm_hw_fence_client *hw_fence_client,
  857. struct dma_fence_array *array, u64 *hash, bool create)
  858. {
  859. struct msm_hw_fence *hw_fences_tbl;
  860. struct msm_hw_fence *join_fence = NULL;
  861. u64 context, seqno;
  862. u32 client_id, pending_child_cnt;
  863. /*
  864. * NOTE: For now we are allocating the join fences from the same table as all
  865. * the other fences (i.e. drv_data->hw_fences_tbl), functionally this will work, however,
  866. * this might impact the lookup algorithm, since the "join-fences" are created with the
  867. * context and seqno of a fence-array, and those might not be changing by the client,
  868. * so this will linearly increment the look-up and very likely impact the other fences if
  869. * these join-fences start to fill-up a particular region of the fences global table.
  870. * So we might have to allocate a different table altogether for these join fences.
  871. * However, to do this, just alloc another table and change it here:
  872. */
  873. hw_fences_tbl = drv_data->hw_fences_tbl;
  874. context = array->base.context;
  875. seqno = array->base.seqno;
  876. pending_child_cnt = array->num_fences;
  877. client_id = HW_FENCE_JOIN_FENCE_CLIENT_ID;
  878. if (create) {
  879. /* allocate the fence */
  880. join_fence = _hw_fence_lookup_and_process(drv_data, hw_fences_tbl, context,
  881. seqno, client_id, pending_child_cnt, HW_FENCE_LOOKUP_OP_CREATE_JOIN, hash);
  882. if (!join_fence)
  883. HWFNC_ERR("Fail to create join fence client:%lu ctx:%llu seqno:%llu\n",
  884. client_id, context, seqno);
  885. } else {
  886. /* destroy the fence */
  887. if (_hw_fence_cleanup(drv_data, hw_fences_tbl, client_id, context, seqno))
  888. HWFNC_ERR("Fail destroying join fence client:%lu ctx:%llu seqno:%llu\n",
  889. client_id, context, seqno);
  890. }
  891. return join_fence;
  892. }
  893. struct msm_hw_fence *msm_hw_fence_find(struct hw_fence_driver_data *drv_data,
  894. struct msm_hw_fence_client *hw_fence_client,
  895. u64 context, u64 seqno, u64 *hash)
  896. {
  897. struct msm_hw_fence *hw_fences_tbl = drv_data->hw_fences_tbl;
  898. struct msm_hw_fence *hw_fence;
  899. u32 client_id = hw_fence_client ? hw_fence_client->client_id : 0xff;
  900. /* find the hw fence */
  901. hw_fence = _hw_fence_lookup_and_process(drv_data, hw_fences_tbl, context,
  902. seqno, client_id, 0, HW_FENCE_LOOKUP_OP_FIND_FENCE, hash);
  903. if (!hw_fence)
  904. HWFNC_ERR("Fail to find hw fence client:%lu ctx:%llu seqno:%llu\n",
  905. client_id, context, seqno);
  906. return hw_fence;
  907. }
  908. static void _fence_ctl_signal(struct hw_fence_driver_data *drv_data,
  909. struct msm_hw_fence_client *hw_fence_client, struct msm_hw_fence *hw_fence, u64 hash,
  910. u64 flags, u64 client_data, u32 error)
  911. {
  912. u32 tx_client_id = drv_data->ipcc_client_pid; /* phys id for tx client */
  913. u32 rx_client_id = hw_fence_client->ipc_client_vid; /* virt id for rx client */
  914. HWFNC_DBG_H("We must signal the client now! hfence hash:%llu\n", hash);
  915. /* Write to Rx queue */
  916. if (hw_fence_client->update_rxq)
  917. hw_fence_update_queue(drv_data, hw_fence_client, hw_fence->ctx_id,
  918. hw_fence->seq_id, hash, flags, client_data, error, HW_FENCE_RX_QUEUE - 1);
  919. /* Signal the hw fence now */
  920. if (hw_fence_client->send_ipc)
  921. hw_fence_ipcc_trigger_signal(drv_data, tx_client_id, rx_client_id,
  922. hw_fence_client->ipc_signal_id);
  923. }
  924. static void _cleanup_join_and_child_fences(struct hw_fence_driver_data *drv_data,
  925. struct msm_hw_fence_client *hw_fence_client, int iteration, struct dma_fence_array *array,
  926. struct msm_hw_fence *join_fence, u64 hash_join_fence)
  927. {
  928. struct dma_fence *child_fence;
  929. struct msm_hw_fence *hw_fence_child;
  930. int idx, j;
  931. u64 hash = 0;
  932. /* cleanup the child-fences from the parent join-fence */
  933. for (idx = iteration; idx >= 0; idx--) {
  934. child_fence = array->fences[idx];
  935. hw_fence_child = msm_hw_fence_find(drv_data, hw_fence_client, child_fence->context,
  936. child_fence->seqno, &hash);
  937. if (!hw_fence_child) {
  938. HWFNC_ERR("Cannot cleanup child fence context:%lu seqno:%lu hash:%lu\n",
  939. child_fence->context, child_fence->seqno, hash);
  940. /*
  941. * ideally this should not have happened, but if it did, try to keep
  942. * cleaning-up other fences after printing the error
  943. */
  944. continue;
  945. }
  946. /* lock the child while we clean it up from the parent join-fence */
  947. GLOBAL_ATOMIC_STORE(drv_data, &hw_fence_child->lock, 1); /* lock */
  948. for (j = hw_fence_child->parents_cnt; j > 0; j--) {
  949. if (j > MSM_HW_FENCE_MAX_JOIN_PARENTS) {
  950. HWFNC_ERR("Invalid max parents_cnt:%d, will reset to max:%d\n",
  951. hw_fence_child->parents_cnt, MSM_HW_FENCE_MAX_JOIN_PARENTS);
  952. j = MSM_HW_FENCE_MAX_JOIN_PARENTS;
  953. }
  954. if (hw_fence_child->parent_list[j - 1] == hash_join_fence) {
  955. hw_fence_child->parent_list[j - 1] = HW_FENCE_INVALID_PARENT_FENCE;
  956. if (hw_fence_child->parents_cnt)
  957. hw_fence_child->parents_cnt--;
  958. /* update memory for the table update */
  959. wmb();
  960. }
  961. }
  962. GLOBAL_ATOMIC_STORE(drv_data, &hw_fence_child->lock, 0); /* unlock */
  963. }
  964. /* destroy join fence */
  965. _hw_fence_process_join_fence(drv_data, hw_fence_client, array, &hash_join_fence,
  966. false);
  967. }
  968. int hw_fence_process_fence_array(struct hw_fence_driver_data *drv_data,
  969. struct msm_hw_fence_client *hw_fence_client, struct dma_fence_array *array,
  970. u64 *hash_join_fence, u64 client_data)
  971. {
  972. struct msm_hw_fence *join_fence;
  973. struct msm_hw_fence *hw_fence_child;
  974. struct dma_fence *child_fence;
  975. bool signal_join_fence = false;
  976. u64 hash;
  977. int i, ret = 0;
  978. enum hw_fence_client_data_id data_id;
  979. if (client_data) {
  980. data_id = hw_fence_get_client_data_id(hw_fence_client->client_id);
  981. if (data_id >= HW_FENCE_MAX_CLIENTS_WITH_DATA) {
  982. HWFNC_ERR("Populating non-zero client_data:%llu with invalid client:%d\n",
  983. client_data, hw_fence_client->client_id);
  984. return -EINVAL;
  985. }
  986. }
  987. /*
  988. * Create join fence from the join-fences table,
  989. * This function initializes:
  990. * join_fence->pending_child_count = array->num_fences
  991. */
  992. join_fence = _hw_fence_process_join_fence(drv_data, hw_fence_client, array,
  993. hash_join_fence, true);
  994. if (!join_fence) {
  995. HWFNC_ERR("cannot alloc hw fence for join fence array\n");
  996. return -EINVAL;
  997. }
  998. /* update this as waiting client of the join-fence */
  999. GLOBAL_ATOMIC_STORE(drv_data, &join_fence->lock, 1); /* lock */
  1000. join_fence->wait_client_mask |= BIT(hw_fence_client->client_id);
  1001. GLOBAL_ATOMIC_STORE(drv_data, &join_fence->lock, 0); /* unlock */
  1002. /* Iterate through fences of the array */
  1003. for (i = 0; i < array->num_fences; i++) {
  1004. child_fence = array->fences[i];
  1005. /* Nested fence-arrays are not supported */
  1006. if (to_dma_fence_array(child_fence)) {
  1007. HWFNC_ERR("This is a nested fence, fail!\n");
  1008. ret = -EINVAL;
  1009. goto error_array;
  1010. }
  1011. /* All elements in the fence-array must be hw-fences */
  1012. if (!test_bit(MSM_HW_FENCE_FLAG_ENABLED_BIT, &child_fence->flags)) {
  1013. HWFNC_ERR("DMA Fence in FenceArray is not a HW Fence\n");
  1014. ret = -EINVAL;
  1015. goto error_array;
  1016. }
  1017. /* Find the HW Fence in the Global Table */
  1018. hw_fence_child = msm_hw_fence_find(drv_data, hw_fence_client, child_fence->context,
  1019. child_fence->seqno, &hash);
  1020. if (!hw_fence_child) {
  1021. HWFNC_ERR("Cannot find child fence context:%lu seqno:%lu hash:%lu\n",
  1022. child_fence->context, child_fence->seqno, hash);
  1023. ret = -EINVAL;
  1024. goto error_array;
  1025. }
  1026. GLOBAL_ATOMIC_STORE(drv_data, &hw_fence_child->lock, 1); /* lock */
  1027. if (hw_fence_child->flags & MSM_HW_FENCE_FLAG_SIGNAL) {
  1028. /* child fence is already signaled */
  1029. GLOBAL_ATOMIC_STORE(drv_data, &join_fence->lock, 1); /* lock */
  1030. if (--join_fence->pending_child_cnt == 0)
  1031. signal_join_fence = true;
  1032. /* update memory for the table update */
  1033. wmb();
  1034. GLOBAL_ATOMIC_STORE(drv_data, &join_fence->lock, 0); /* unlock */
  1035. } else {
  1036. /* child fence is not signaled */
  1037. hw_fence_child->parents_cnt++;
  1038. if (hw_fence_child->parents_cnt >= MSM_HW_FENCE_MAX_JOIN_PARENTS
  1039. || hw_fence_child->parents_cnt < 1) {
  1040. /* Max number of parents for a fence is exceeded */
  1041. HWFNC_ERR("DMA Fence in FenceArray exceeds parents:%d\n",
  1042. hw_fence_child->parents_cnt);
  1043. hw_fence_child->parents_cnt--;
  1044. /* update memory for the table update */
  1045. wmb();
  1046. /* unlock */
  1047. GLOBAL_ATOMIC_STORE(drv_data, &hw_fence_child->lock, 0);
  1048. ret = -EINVAL;
  1049. goto error_array;
  1050. }
  1051. hw_fence_child->parent_list[hw_fence_child->parents_cnt - 1] =
  1052. *hash_join_fence;
  1053. /* update memory for the table update */
  1054. wmb();
  1055. }
  1056. GLOBAL_ATOMIC_STORE(drv_data, &hw_fence_child->lock, 0); /* unlock */
  1057. }
  1058. if (client_data)
  1059. join_fence->client_data[data_id] = client_data;
  1060. /* all fences were signaled, signal client now */
  1061. if (signal_join_fence) {
  1062. /* signal the join hw fence */
  1063. _fence_ctl_signal(drv_data, hw_fence_client, join_fence, *hash_join_fence, 0, 0,
  1064. client_data);
  1065. set_bit(MSM_HW_FENCE_FLAG_SIGNALED_BIT, &array->base.flags);
  1066. /*
  1067. * job of the join-fence is finished since we already signaled,
  1068. * we can delete it now. This can happen when all the fences that
  1069. * are part of the join-fence are already signaled.
  1070. */
  1071. _hw_fence_process_join_fence(drv_data, hw_fence_client, array, hash_join_fence,
  1072. false);
  1073. }
  1074. return ret;
  1075. error_array:
  1076. _cleanup_join_and_child_fences(drv_data, hw_fence_client, i, array, join_fence,
  1077. *hash_join_fence);
  1078. return -EINVAL;
  1079. }
  1080. int hw_fence_register_wait_client(struct hw_fence_driver_data *drv_data,
  1081. struct dma_fence *fence, struct msm_hw_fence_client *hw_fence_client, u64 context,
  1082. u64 seqno, u64 *hash, u64 client_data)
  1083. {
  1084. struct msm_hw_fence *hw_fence;
  1085. enum hw_fence_client_data_id data_id;
  1086. if (client_data) {
  1087. data_id = hw_fence_get_client_data_id(hw_fence_client->client_id);
  1088. if (data_id >= HW_FENCE_MAX_CLIENTS_WITH_DATA) {
  1089. HWFNC_ERR("Populating non-zero client_data:%llu with invalid client:%d\n",
  1090. client_data, hw_fence_client->client_id);
  1091. return -EINVAL;
  1092. }
  1093. }
  1094. /* find the hw fence within the table */
  1095. hw_fence = msm_hw_fence_find(drv_data, hw_fence_client, context, seqno, hash);
  1096. if (!hw_fence) {
  1097. HWFNC_ERR("Cannot find fence!\n");
  1098. return -EINVAL;
  1099. }
  1100. GLOBAL_ATOMIC_STORE(drv_data, &hw_fence->lock, 1); /* lock */
  1101. /* register client in the hw fence */
  1102. hw_fence->wait_client_mask |= BIT(hw_fence_client->client_id);
  1103. hw_fence->fence_wait_time = hw_fence_get_qtime(drv_data);
  1104. hw_fence->debug_refcount++;
  1105. if (client_data)
  1106. hw_fence->client_data[data_id] = client_data;
  1107. /* update memory for the table update */
  1108. wmb();
  1109. GLOBAL_ATOMIC_STORE(drv_data, &hw_fence->lock, 0); /* unlock */
  1110. /* if hw fence already signaled, signal the client */
  1111. if (hw_fence->flags & MSM_HW_FENCE_FLAG_SIGNAL) {
  1112. if (fence != NULL)
  1113. set_bit(MSM_HW_FENCE_FLAG_SIGNALED_BIT, &fence->flags);
  1114. _fence_ctl_signal(drv_data, hw_fence_client, hw_fence, *hash, 0, client_data, 0);
  1115. }
  1116. return 0;
  1117. }
  1118. int hw_fence_process_fence(struct hw_fence_driver_data *drv_data,
  1119. struct msm_hw_fence_client *hw_fence_client,
  1120. struct dma_fence *fence, u64 *hash, u64 client_data)
  1121. {
  1122. int ret = 0;
  1123. if (!drv_data | !hw_fence_client | !fence) {
  1124. HWFNC_ERR("Invalid Input!\n");
  1125. return -EINVAL;
  1126. }
  1127. /* fence must be hw-fence */
  1128. if (!test_bit(MSM_HW_FENCE_FLAG_ENABLED_BIT, &fence->flags)) {
  1129. HWFNC_ERR("DMA Fence in is not a HW Fence flags:0x%llx\n", fence->flags);
  1130. return -EINVAL;
  1131. }
  1132. ret = hw_fence_register_wait_client(drv_data, fence, hw_fence_client, fence->context,
  1133. fence->seqno, hash, client_data);
  1134. if (ret)
  1135. HWFNC_ERR("Error registering for wait client:%d\n", hw_fence_client->client_id);
  1136. return ret;
  1137. }
  1138. static void _signal_all_wait_clients(struct hw_fence_driver_data *drv_data,
  1139. struct msm_hw_fence *hw_fence, u64 hash, int error)
  1140. {
  1141. enum hw_fence_client_id wait_client_id;
  1142. enum hw_fence_client_data_id data_id;
  1143. struct msm_hw_fence_client *hw_fence_wait_client;
  1144. u64 client_data = 0;
  1145. /* signal with an error all the waiting clients for this fence */
  1146. for (wait_client_id = 0; wait_client_id < HW_FENCE_CLIENT_MAX; wait_client_id++) {
  1147. if (hw_fence->wait_client_mask & BIT(wait_client_id)) {
  1148. hw_fence_wait_client = drv_data->clients[wait_client_id];
  1149. data_id = hw_fence_get_client_data_id(wait_client_id);
  1150. if (data_id < HW_FENCE_MAX_CLIENTS_WITH_DATA)
  1151. client_data = hw_fence->client_data[data_id];
  1152. if (hw_fence_wait_client)
  1153. _fence_ctl_signal(drv_data, hw_fence_wait_client, hw_fence,
  1154. hash, 0, client_data, error);
  1155. }
  1156. }
  1157. }
  1158. int hw_fence_utils_cleanup_fence(struct hw_fence_driver_data *drv_data,
  1159. struct msm_hw_fence_client *hw_fence_client, struct msm_hw_fence *hw_fence, u64 hash,
  1160. u32 reset_flags)
  1161. {
  1162. int ret = 0;
  1163. int error = (reset_flags & MSM_HW_FENCE_RESET_WITHOUT_ERROR) ? 0 : MSM_HW_FENCE_ERROR_RESET;
  1164. GLOBAL_ATOMIC_STORE(drv_data, &hw_fence->lock, 1); /* lock */
  1165. if (hw_fence->wait_client_mask & BIT(hw_fence_client->client_id)) {
  1166. HWFNC_DBG_H("clearing client:%d wait bit for fence: ctx:%d seqno:%d\n",
  1167. hw_fence_client->client_id, hw_fence->ctx_id,
  1168. hw_fence->seq_id);
  1169. hw_fence->wait_client_mask &= ~BIT(hw_fence_client->client_id);
  1170. /* update memory for the table update */
  1171. wmb();
  1172. }
  1173. GLOBAL_ATOMIC_STORE(drv_data, &hw_fence->lock, 0); /* unlock */
  1174. if (hw_fence->fence_allocator == hw_fence_client->client_id) {
  1175. /* if fence is not signaled, signal with error all the waiting clients */
  1176. if (!(hw_fence->flags & MSM_HW_FENCE_FLAG_SIGNAL))
  1177. _signal_all_wait_clients(drv_data, hw_fence, hash, error);
  1178. if (reset_flags & MSM_HW_FENCE_RESET_WITHOUT_DESTROY)
  1179. goto skip_destroy;
  1180. ret = hw_fence_destroy(drv_data, hw_fence_client,
  1181. hw_fence->ctx_id, hw_fence->seq_id);
  1182. if (ret) {
  1183. HWFNC_ERR("Error destroying HW fence: ctx:%d seqno:%d\n",
  1184. hw_fence->ctx_id, hw_fence->seq_id);
  1185. }
  1186. }
  1187. skip_destroy:
  1188. return ret;
  1189. }
  1190. enum hw_fence_client_data_id hw_fence_get_client_data_id(enum hw_fence_client_id client_id)
  1191. {
  1192. enum hw_fence_client_data_id data_id;
  1193. switch (client_id) {
  1194. case HW_FENCE_CLIENT_ID_CTX0:
  1195. data_id = HW_FENCE_CLIENT_DATA_ID_CTX0;
  1196. break;
  1197. case HW_FENCE_CLIENT_ID_VAL0:
  1198. data_id = HW_FENCE_CLIENT_DATA_ID_VAL0;
  1199. break;
  1200. case HW_FENCE_CLIENT_ID_VAL1:
  1201. data_id = HW_FENCE_CLIENT_DATA_ID_VAL1;
  1202. break;
  1203. default:
  1204. data_id = HW_FENCE_MAX_CLIENTS_WITH_DATA;
  1205. break;
  1206. }
  1207. return data_id;
  1208. }