synx_util.c 41 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2019-2021, The Linux Foundation. All rights reserved.
  4. * Copyright (c) 2022-2024, Qualcomm Innovation Center, Inc. All rights reserved.
  5. */
  6. #include <linux/slab.h>
  7. #include <linux/random.h>
  8. #include <linux/vmalloc.h>
  9. #include "synx_debugfs.h"
  10. #include "synx_util.h"
  11. #include "synx_private.h"
  12. extern void synx_external_callback(s32 sync_obj, int status, void *data);
  13. int synx_util_init_coredata(struct synx_coredata *synx_obj,
  14. struct synx_create_params *params,
  15. struct dma_fence_ops *ops,
  16. u64 dma_context)
  17. {
  18. int rc = -SYNX_INVALID;
  19. spinlock_t *fence_lock;
  20. struct dma_fence *fence;
  21. struct synx_fence_entry *entry;
  22. if (IS_ERR_OR_NULL(synx_obj) || IS_ERR_OR_NULL(params) ||
  23. IS_ERR_OR_NULL(ops) || IS_ERR_OR_NULL(params->h_synx))
  24. return -SYNX_INVALID;
  25. if (params->flags & SYNX_CREATE_GLOBAL_FENCE &&
  26. *params->h_synx != 0) {
  27. rc = synx_global_get_ref(
  28. synx_util_global_idx(*params->h_synx));
  29. synx_obj->global_idx = synx_util_global_idx(*params->h_synx);
  30. } else if (params->flags & SYNX_CREATE_GLOBAL_FENCE) {
  31. rc = synx_alloc_global_handle(params->h_synx);
  32. synx_obj->global_idx = synx_util_global_idx(*params->h_synx);
  33. } else {
  34. rc = synx_alloc_local_handle(params->h_synx);
  35. }
  36. if (rc != SYNX_SUCCESS)
  37. return rc;
  38. synx_obj->map_count = 1;
  39. synx_obj->num_bound_synxs = 0;
  40. synx_obj->type |= params->flags;
  41. kref_init(&synx_obj->refcount);
  42. mutex_init(&synx_obj->obj_lock);
  43. INIT_LIST_HEAD(&synx_obj->reg_cbs_list);
  44. if (params->name)
  45. strlcpy(synx_obj->name, params->name, sizeof(synx_obj->name));
  46. if (params->flags & SYNX_CREATE_DMA_FENCE) {
  47. fence = (struct dma_fence *)params->fence;
  48. if (IS_ERR_OR_NULL(fence)) {
  49. dprintk(SYNX_ERR, "invalid external fence\n");
  50. goto free;
  51. }
  52. dma_fence_get(fence);
  53. synx_obj->fence = fence;
  54. } else {
  55. /*
  56. * lock and fence memory will be released in fence
  57. * release function
  58. */
  59. fence_lock = kzalloc(sizeof(*fence_lock), GFP_KERNEL);
  60. if (IS_ERR_OR_NULL(fence_lock)) {
  61. rc = -SYNX_NOMEM;
  62. goto free;
  63. }
  64. fence = kzalloc(sizeof(*fence), GFP_KERNEL);
  65. if (IS_ERR_OR_NULL(fence)) {
  66. kfree(fence_lock);
  67. rc = -SYNX_NOMEM;
  68. goto free;
  69. }
  70. spin_lock_init(fence_lock);
  71. dma_fence_init(fence, ops, fence_lock, dma_context, 1);
  72. synx_obj->fence = fence;
  73. synx_util_activate(synx_obj);
  74. dprintk(SYNX_MEM,
  75. "allocated backing fence %pK\n", fence);
  76. entry = kzalloc(sizeof(*entry), GFP_KERNEL);
  77. if (IS_ERR_OR_NULL(entry)) {
  78. rc = -SYNX_NOMEM;
  79. goto clean;
  80. }
  81. entry->key = (u64)fence;
  82. if (params->flags & SYNX_CREATE_GLOBAL_FENCE)
  83. entry->g_handle = *params->h_synx;
  84. else
  85. entry->l_handle = *params->h_synx;
  86. rc = synx_util_insert_fence_entry(entry,
  87. params->h_synx,
  88. params->flags & SYNX_CREATE_GLOBAL_FENCE);
  89. BUG_ON(rc != SYNX_SUCCESS);
  90. }
  91. if (rc != SYNX_SUCCESS)
  92. goto clean;
  93. synx_obj->status = synx_util_get_object_status(synx_obj);
  94. return SYNX_SUCCESS;
  95. clean:
  96. dma_fence_put(fence);
  97. free:
  98. if (params->flags & SYNX_CREATE_GLOBAL_FENCE)
  99. synx_global_put_ref(
  100. synx_util_global_idx(*params->h_synx));
  101. else
  102. clear_bit(synx_util_global_idx(*params->h_synx),
  103. synx_dev->native->bitmap);
  104. return rc;
  105. }
  106. int synx_util_add_callback(struct synx_coredata *synx_obj,
  107. u32 h_synx)
  108. {
  109. int rc;
  110. struct synx_signal_cb *signal_cb;
  111. if (IS_ERR_OR_NULL(synx_obj))
  112. return -SYNX_INVALID;
  113. signal_cb = kzalloc(sizeof(*signal_cb), GFP_KERNEL);
  114. if (IS_ERR_OR_NULL(signal_cb))
  115. return -SYNX_NOMEM;
  116. signal_cb->handle = h_synx;
  117. signal_cb->flag = SYNX_SIGNAL_FROM_FENCE;
  118. signal_cb->synx_obj = synx_obj;
  119. /* get reference on synx coredata for signal cb */
  120. synx_util_get_object(synx_obj);
  121. /*
  122. * adding callback enables synx framework to
  123. * get notified on signal from clients using
  124. * native dma fence operations.
  125. */
  126. rc = dma_fence_add_callback(synx_obj->fence,
  127. &signal_cb->fence_cb, synx_fence_callback);
  128. if (rc != 0) {
  129. if (rc == -ENOENT) {
  130. if (synx_util_is_global_object(synx_obj)) {
  131. /* signal (if) global handle */
  132. rc = synx_global_update_status(
  133. synx_obj->global_idx,
  134. synx_util_get_object_status(synx_obj));
  135. if (rc != SYNX_SUCCESS)
  136. dprintk(SYNX_ERR,
  137. "status update of %u with fence %pK\n",
  138. synx_obj->global_idx, synx_obj->fence);
  139. } else {
  140. rc = SYNX_SUCCESS;
  141. }
  142. } else {
  143. dprintk(SYNX_ERR,
  144. "error adding callback for %pK err %d\n",
  145. synx_obj->fence, rc);
  146. }
  147. synx_util_put_object(synx_obj);
  148. kfree(signal_cb);
  149. return rc;
  150. }
  151. synx_obj->signal_cb = signal_cb;
  152. dprintk(SYNX_VERB, "added callback %pK to fence %pK\n",
  153. signal_cb, synx_obj->fence);
  154. return SYNX_SUCCESS;
  155. }
  156. static int synx_util_count_dma_array_fences(struct dma_fence *fence)
  157. {
  158. struct dma_fence_cb *cur, *tmp;
  159. int32_t num_dma_array = 0;
  160. struct dma_fence_array_cb *cb_array = NULL;
  161. struct dma_fence_array *array = NULL;
  162. if (IS_ERR_OR_NULL(fence)) {
  163. dprintk(SYNX_ERR, "invalid fence passed\n");
  164. return num_dma_array;
  165. }
  166. list_for_each_entry_safe(cur, tmp, &fence->cb_list, node) {
  167. // count for parent fences
  168. cb_array = container_of(cur, struct dma_fence_array_cb, cb);
  169. if (IS_ERR_OR_NULL(cb_array)) {
  170. dprintk(SYNX_VERB, "cb_array not found in fence %pK\n", fence);
  171. continue;
  172. }
  173. array = cb_array->array;
  174. if (!IS_ERR_OR_NULL(array) && dma_fence_is_array(&(array->base)))
  175. num_dma_array++;
  176. }
  177. dprintk(SYNX_VERB, "number of fence_array found %d for child fence %pK\n",
  178. num_dma_array, fence);
  179. return num_dma_array;
  180. }
  181. int synx_util_init_group_coredata(struct synx_coredata *synx_obj,
  182. struct dma_fence **fences,
  183. struct synx_merge_params *params,
  184. u32 num_objs,
  185. u64 dma_context)
  186. {
  187. int rc;
  188. struct dma_fence_array *array;
  189. if (IS_ERR_OR_NULL(synx_obj))
  190. return -SYNX_INVALID;
  191. if (params->flags & SYNX_MERGE_GLOBAL_FENCE) {
  192. rc = synx_alloc_global_handle(params->h_merged_obj);
  193. synx_obj->global_idx =
  194. synx_util_global_idx(*params->h_merged_obj);
  195. } else {
  196. rc = synx_alloc_local_handle(params->h_merged_obj);
  197. }
  198. if (rc != SYNX_SUCCESS)
  199. return rc;
  200. array = dma_fence_array_create(num_objs, fences,
  201. dma_context, 1, false);
  202. if (IS_ERR_OR_NULL(array))
  203. return -SYNX_INVALID;
  204. synx_obj->fence = &array->base;
  205. synx_obj->map_count = 1;
  206. synx_obj->type = params->flags;
  207. synx_obj->type |= SYNX_CREATE_MERGED_FENCE;
  208. synx_obj->num_bound_synxs = 0;
  209. kref_init(&synx_obj->refcount);
  210. mutex_init(&synx_obj->obj_lock);
  211. INIT_LIST_HEAD(&synx_obj->reg_cbs_list);
  212. synx_obj->status = synx_util_get_object_status(synx_obj);
  213. synx_util_activate(synx_obj);
  214. return rc;
  215. }
  216. void synx_util_destroy_coredata(struct kref *kref)
  217. {
  218. int rc;
  219. struct synx_coredata *synx_obj =
  220. container_of(kref, struct synx_coredata, refcount);
  221. if (synx_util_is_global_object(synx_obj)) {
  222. rc = synx_global_clear_subscribed_core(synx_obj->global_idx, SYNX_CORE_APSS);
  223. if (rc)
  224. dprintk(SYNX_ERR, "Failed to clear subscribers");
  225. synx_global_put_ref(synx_obj->global_idx);
  226. }
  227. synx_util_object_destroy(synx_obj);
  228. }
  229. void synx_util_get_object(struct synx_coredata *synx_obj)
  230. {
  231. kref_get(&synx_obj->refcount);
  232. }
  233. void synx_util_put_object(struct synx_coredata *synx_obj)
  234. {
  235. kref_put(&synx_obj->refcount, synx_util_destroy_coredata);
  236. }
  237. int synx_util_cleanup_merged_fence(struct synx_coredata *synx_obj, int status)
  238. {
  239. struct dma_fence_array *array = NULL;
  240. u32 i;
  241. int rc = 0;
  242. if (IS_ERR_OR_NULL(synx_obj) || IS_ERR_OR_NULL(synx_obj->fence))
  243. return -SYNX_INVALID;
  244. if (dma_fence_is_array(synx_obj->fence)) {
  245. array = to_dma_fence_array(synx_obj->fence);
  246. if (IS_ERR_OR_NULL(array))
  247. return -SYNX_INVALID;
  248. for (i = 0; i < array->num_fences; i++) {
  249. if (kref_read(&array->fences[i]->refcount) == 1 &&
  250. __fence_state(array->fences[i], false) == SYNX_STATE_ACTIVE) {
  251. dma_fence_set_error(array->fences[i],
  252. -SYNX_STATE_SIGNALED_CANCEL);
  253. rc = dma_fence_signal(array->fences[i]);
  254. if (rc)
  255. dprintk(SYNX_ERR,
  256. "signaling child fence %pK failed=%d\n",
  257. array->fences[i], rc);
  258. }
  259. }
  260. }
  261. return rc;
  262. }
  263. void synx_util_object_destroy(struct synx_coredata *synx_obj)
  264. {
  265. int rc;
  266. int num_dma_array = 0;
  267. u32 i;
  268. s32 sync_id;
  269. u32 type;
  270. unsigned long flags;
  271. struct synx_cb_data *synx_cb, *synx_cb_temp;
  272. struct synx_bind_desc *bind_desc;
  273. struct bind_operations *bind_ops;
  274. struct synx_external_data *data;
  275. /* clear all the undispatched callbacks */
  276. list_for_each_entry_safe(synx_cb,
  277. synx_cb_temp, &synx_obj->reg_cbs_list, node) {
  278. dprintk(SYNX_ERR,
  279. "dipatching un-released callbacks of session %pK\n",
  280. synx_cb->session);
  281. synx_cb->status = SYNX_STATE_SIGNALED_CANCEL;
  282. if (synx_cb->timeout != SYNX_NO_TIMEOUT) {
  283. dprintk(SYNX_VERB,
  284. "Deleting timer synx_cb 0x%x, timeout 0x%llx\n",
  285. synx_cb, synx_cb->timeout);
  286. del_timer(&synx_cb->synx_timer);
  287. }
  288. list_del_init(&synx_cb->node);
  289. queue_work(synx_dev->wq_cb,
  290. &synx_cb->cb_dispatch);
  291. dprintk(SYNX_VERB, "dispatched callback for fence %pKn", synx_obj->fence);
  292. }
  293. for (i = 0; i < synx_obj->num_bound_synxs; i++) {
  294. bind_desc = &synx_obj->bound_synxs[i];
  295. sync_id = bind_desc->external_desc.id;
  296. type = bind_desc->external_desc.type;
  297. data = bind_desc->external_data;
  298. bind_ops = synx_util_get_bind_ops(type);
  299. if (IS_ERR_OR_NULL(bind_ops)) {
  300. dprintk(SYNX_ERR,
  301. "bind ops fail id: %d, type: %u, err: %d\n",
  302. sync_id, type, rc);
  303. continue;
  304. }
  305. /* clear the hash table entry */
  306. synx_util_remove_data(&sync_id, type);
  307. rc = bind_ops->deregister_callback(
  308. synx_external_callback, data, sync_id);
  309. if (rc < 0) {
  310. dprintk(SYNX_ERR,
  311. "de-registration fail id: %d, type: %u, err: %d\n",
  312. sync_id, type, rc);
  313. continue;
  314. }
  315. /*
  316. * release the memory allocated for external data.
  317. * It is safe to release this memory
  318. * only if deregistration is successful.
  319. */
  320. kfree(data);
  321. }
  322. mutex_destroy(&synx_obj->obj_lock);
  323. synx_util_release_fence_entry((u64)synx_obj->fence);
  324. /* dma fence framework expects handles are signaled before release,
  325. * so signal if active handle and has last refcount. Synx handles
  326. * on other cores are still active to carry out usual callflow.
  327. */
  328. if (!IS_ERR_OR_NULL(synx_obj->fence)) {
  329. spin_lock_irqsave(synx_obj->fence->lock, flags);
  330. if (synx_util_is_merged_object(synx_obj) &&
  331. synx_util_get_object_status_locked(synx_obj) == SYNX_STATE_ACTIVE)
  332. rc = synx_util_cleanup_merged_fence(synx_obj, -SYNX_STATE_SIGNALED_CANCEL);
  333. else if (synx_util_get_object_status_locked(synx_obj) == SYNX_STATE_ACTIVE) {
  334. num_dma_array = synx_util_count_dma_array_fences(synx_obj->fence);
  335. if (kref_read(&synx_obj->fence->refcount) == 1 + num_dma_array) {
  336. // set fence error to cancel
  337. dma_fence_set_error(synx_obj->fence,
  338. -SYNX_STATE_SIGNALED_CANCEL);
  339. rc = dma_fence_signal_locked(synx_obj->fence);
  340. }
  341. }
  342. spin_unlock_irqrestore(synx_obj->fence->lock, flags);
  343. if (rc)
  344. dprintk(SYNX_ERR,
  345. "signaling fence %pK failed=%d\n",
  346. synx_obj->fence, rc);
  347. }
  348. dma_fence_put(synx_obj->fence);
  349. kfree(synx_obj);
  350. dprintk(SYNX_MEM, "released synx object %pK\n", synx_obj);
  351. }
  352. long synx_util_get_free_handle(unsigned long *bitmap, unsigned int size)
  353. {
  354. bool bit;
  355. long idx;
  356. do {
  357. idx = find_first_zero_bit(bitmap, size);
  358. if (idx >= size)
  359. break;
  360. bit = test_and_set_bit(idx, bitmap);
  361. } while (bit);
  362. return idx;
  363. }
  364. u32 synx_encode_handle(u32 idx, u32 core_id, bool global_idx)
  365. {
  366. u32 handle = 0;
  367. if (idx >= SYNX_MAX_OBJS)
  368. return 0;
  369. if (global_idx) {
  370. handle = 1;
  371. handle <<= SYNX_HANDLE_CORE_BITS;
  372. }
  373. handle |= core_id;
  374. handle <<= SYNX_HANDLE_INDEX_BITS;
  375. handle |= idx;
  376. return handle;
  377. }
  378. int synx_alloc_global_handle(u32 *new_synx)
  379. {
  380. int rc;
  381. u32 idx;
  382. rc = synx_global_alloc_index(&idx);
  383. if (rc != SYNX_SUCCESS)
  384. return rc;
  385. *new_synx = synx_encode_handle(idx, SYNX_CORE_APSS, true);
  386. dprintk(SYNX_DBG, "allocated global handle %u (0x%x)\n",
  387. *new_synx, *new_synx);
  388. rc = synx_global_init_coredata(*new_synx);
  389. return rc;
  390. }
  391. int synx_alloc_local_handle(u32 *new_synx)
  392. {
  393. u32 idx;
  394. idx = synx_util_get_free_handle(synx_dev->native->bitmap,
  395. SYNX_MAX_OBJS);
  396. if (idx >= SYNX_MAX_OBJS)
  397. return -SYNX_NOMEM;
  398. *new_synx = synx_encode_handle(idx, SYNX_CORE_APSS, false);
  399. dprintk(SYNX_DBG, "allocated local handle %u (0x%x)\n",
  400. *new_synx, *new_synx);
  401. return SYNX_SUCCESS;
  402. }
  403. int synx_util_init_handle(struct synx_client *client,
  404. struct synx_coredata *synx_obj, u32 *new_h_synx,
  405. void *map_entry)
  406. {
  407. int rc = SYNX_SUCCESS;
  408. bool found = false;
  409. struct synx_handle_coredata *synx_data, *curr;
  410. if (IS_ERR_OR_NULL(client) || IS_ERR_OR_NULL(synx_obj) ||
  411. IS_ERR_OR_NULL(new_h_synx) || IS_ERR_OR_NULL(map_entry))
  412. return -SYNX_INVALID;
  413. synx_data = kzalloc(sizeof(*synx_data), GFP_ATOMIC);
  414. if (IS_ERR_OR_NULL(synx_data))
  415. return -SYNX_NOMEM;
  416. synx_data->client = client;
  417. synx_data->synx_obj = synx_obj;
  418. synx_data->key = *new_h_synx;
  419. synx_data->map_entry = map_entry;
  420. kref_init(&synx_data->refcount);
  421. synx_data->rel_count = 1;
  422. spin_lock_bh(&client->handle_map_lock);
  423. hash_for_each_possible(client->handle_map,
  424. curr, node, *new_h_synx) {
  425. if (curr->key == *new_h_synx) {
  426. if (curr->synx_obj != synx_obj) {
  427. rc = -SYNX_INVALID;
  428. dprintk(SYNX_ERR,
  429. "inconsistent data in handle map\n");
  430. } else {
  431. kref_get(&curr->refcount);
  432. curr->rel_count++;
  433. }
  434. found = true;
  435. break;
  436. }
  437. }
  438. if (unlikely(found))
  439. kfree(synx_data);
  440. else
  441. hash_add(client->handle_map,
  442. &synx_data->node, *new_h_synx);
  443. spin_unlock_bh(&client->handle_map_lock);
  444. return rc;
  445. }
  446. int synx_util_activate(struct synx_coredata *synx_obj)
  447. {
  448. if (IS_ERR_OR_NULL(synx_obj))
  449. return -SYNX_INVALID;
  450. /* move synx to ACTIVE state and register cb for merged object */
  451. dma_fence_enable_sw_signaling(synx_obj->fence);
  452. return 0;
  453. }
  454. static u32 synx_util_get_references(struct synx_coredata *synx_obj)
  455. {
  456. u32 count = 0;
  457. u32 i = 0;
  458. struct dma_fence_array *array = NULL;
  459. /* obtain dma fence reference */
  460. if (dma_fence_is_array(synx_obj->fence)) {
  461. array = to_dma_fence_array(synx_obj->fence);
  462. if (IS_ERR_OR_NULL(array))
  463. return 0;
  464. for (i = 0; i < array->num_fences; i++)
  465. dma_fence_get(array->fences[i]);
  466. count = array->num_fences;
  467. } else {
  468. dma_fence_get(synx_obj->fence);
  469. count = 1;
  470. }
  471. return count;
  472. }
  473. static void synx_util_put_references(struct synx_coredata *synx_obj)
  474. {
  475. u32 i = 0;
  476. struct dma_fence_array *array = NULL;
  477. if (dma_fence_is_array(synx_obj->fence)) {
  478. array = to_dma_fence_array(synx_obj->fence);
  479. if (IS_ERR_OR_NULL(array))
  480. return;
  481. for (i = 0; i < array->num_fences; i++)
  482. dma_fence_put(array->fences[i]);
  483. } else {
  484. dma_fence_put(synx_obj->fence);
  485. }
  486. }
  487. static u32 synx_util_add_fence(struct synx_coredata *synx_obj,
  488. struct dma_fence **fences,
  489. u32 idx)
  490. {
  491. struct dma_fence_array *array = NULL;
  492. u32 i = 0;
  493. if (dma_fence_is_array(synx_obj->fence)) {
  494. array = to_dma_fence_array(synx_obj->fence);
  495. if (IS_ERR_OR_NULL(array))
  496. return 0;
  497. for (i = 0; i < array->num_fences; i++)
  498. fences[idx+i] = array->fences[i];
  499. return array->num_fences;
  500. }
  501. fences[idx] = synx_obj->fence;
  502. return 1;
  503. }
  504. static u32 synx_util_remove_duplicates(struct dma_fence **arr, u32 num)
  505. {
  506. int i, j;
  507. u32 wr_idx = 1;
  508. if (IS_ERR_OR_NULL(arr)) {
  509. dprintk(SYNX_ERR, "invalid input array\n");
  510. return 0;
  511. }
  512. for (i = 1; i < num; i++) {
  513. for (j = 0; j < wr_idx ; j++) {
  514. if (arr[i] == arr[j]) {
  515. /* release reference obtained for duplicate */
  516. dprintk(SYNX_DBG,
  517. "releasing duplicate reference\n");
  518. dma_fence_put(arr[i]);
  519. break;
  520. }
  521. }
  522. if (j == wr_idx)
  523. arr[wr_idx++] = arr[i];
  524. }
  525. return wr_idx;
  526. }
  527. s32 synx_util_merge_error(struct synx_client *client,
  528. u32 *h_synxs,
  529. u32 num_objs)
  530. {
  531. u32 i = 0;
  532. struct synx_handle_coredata *synx_data;
  533. struct synx_coredata *synx_obj;
  534. if (IS_ERR_OR_NULL(client) || IS_ERR_OR_NULL(h_synxs))
  535. return -SYNX_INVALID;
  536. for (i = 0; i < num_objs; i++) {
  537. synx_data = synx_util_acquire_handle(client, h_synxs[i]);
  538. synx_obj = synx_util_obtain_object(synx_data);
  539. if (IS_ERR_OR_NULL(synx_obj) ||
  540. IS_ERR_OR_NULL(synx_obj->fence)) {
  541. dprintk(SYNX_ERR,
  542. "[sess :%llu] invalid handle %d in cleanup\n",
  543. client->id, h_synxs[i]);
  544. continue;
  545. }
  546. /* release all references obtained during merge validatation */
  547. synx_util_put_references(synx_obj);
  548. synx_util_release_handle(synx_data);
  549. }
  550. return 0;
  551. }
  552. int synx_util_validate_merge(struct synx_client *client,
  553. u32 *h_synxs,
  554. u32 num_objs,
  555. struct dma_fence ***fence_list,
  556. u32 *fence_cnt)
  557. {
  558. u32 count = 0;
  559. u32 i = 0;
  560. struct synx_handle_coredata **synx_datas;
  561. struct synx_coredata **synx_objs;
  562. struct dma_fence **fences = NULL;
  563. if (num_objs <= 1) {
  564. dprintk(SYNX_ERR, "single handle merge is not allowed\n");
  565. return -SYNX_INVALID;
  566. }
  567. synx_datas = kcalloc(num_objs, sizeof(*synx_datas), GFP_KERNEL);
  568. if (IS_ERR_OR_NULL(synx_datas))
  569. return -SYNX_NOMEM;
  570. synx_objs = kcalloc(num_objs, sizeof(*synx_objs), GFP_KERNEL);
  571. if (IS_ERR_OR_NULL(synx_objs)) {
  572. kfree(synx_datas);
  573. return -SYNX_NOMEM;
  574. }
  575. for (i = 0; i < num_objs; i++) {
  576. synx_datas[i] = synx_util_acquire_handle(client, h_synxs[i]);
  577. synx_objs[i] = synx_util_obtain_object(synx_datas[i]);
  578. if (IS_ERR_OR_NULL(synx_objs[i]) ||
  579. IS_ERR_OR_NULL(synx_objs[i]->fence)) {
  580. dprintk(SYNX_ERR,
  581. "[sess :%llu] invalid handle %d in merge list\n",
  582. client->id, h_synxs[i]);
  583. *fence_cnt = i;
  584. goto error;
  585. }
  586. count += synx_util_get_references(synx_objs[i]);
  587. }
  588. fences = kcalloc(count, sizeof(*fences), GFP_KERNEL);
  589. if (IS_ERR_OR_NULL(fences)) {
  590. *fence_cnt = num_objs;
  591. goto error;
  592. }
  593. /* memory will be released later in the invoking function */
  594. *fence_list = fences;
  595. count = 0;
  596. for (i = 0; i < num_objs; i++) {
  597. count += synx_util_add_fence(synx_objs[i], fences, count);
  598. /* release the reference obtained earlier in the function */
  599. synx_util_release_handle(synx_datas[i]);
  600. }
  601. *fence_cnt = synx_util_remove_duplicates(fences, count);
  602. kfree(synx_objs);
  603. kfree(synx_datas);
  604. return 0;
  605. error:
  606. /* release the reference/s obtained earlier in the function */
  607. for (i = 0; i < *fence_cnt; i++) {
  608. synx_util_put_references(synx_objs[i]);
  609. synx_util_release_handle(synx_datas[i]);
  610. }
  611. *fence_cnt = 0;
  612. kfree(synx_objs);
  613. kfree(synx_datas);
  614. return -SYNX_INVALID;
  615. }
  616. u32 __fence_state(struct dma_fence *fence, bool locked)
  617. {
  618. s32 status;
  619. u32 state = SYNX_STATE_INVALID;
  620. if (IS_ERR_OR_NULL(fence)) {
  621. dprintk(SYNX_ERR, "invalid fence\n");
  622. return SYNX_STATE_INVALID;
  623. }
  624. if (locked)
  625. status = dma_fence_get_status_locked(fence);
  626. else
  627. status = dma_fence_get_status(fence);
  628. /* convert fence status to synx state */
  629. switch (status) {
  630. case 0:
  631. state = SYNX_STATE_ACTIVE;
  632. break;
  633. case 1:
  634. state = SYNX_STATE_SIGNALED_SUCCESS;
  635. break;
  636. case -SYNX_STATE_SIGNALED_CANCEL:
  637. state = SYNX_STATE_SIGNALED_CANCEL;
  638. break;
  639. case -SYNX_STATE_SIGNALED_EXTERNAL:
  640. state = SYNX_STATE_SIGNALED_EXTERNAL;
  641. break;
  642. case -SYNX_STATE_SIGNALED_ERROR:
  643. state = SYNX_STATE_SIGNALED_ERROR;
  644. break;
  645. default:
  646. state = (u32)(-status);
  647. }
  648. return state;
  649. }
  650. static u32 __fence_group_state(struct dma_fence *fence, bool locked)
  651. {
  652. u32 i = 0;
  653. u32 state = SYNX_STATE_INVALID, parent_state = SYNX_STATE_INVALID;
  654. struct dma_fence_array *array = NULL;
  655. u32 intr, actv_cnt, sig_cnt, err_cnt;
  656. if (IS_ERR_OR_NULL(fence)) {
  657. dprintk(SYNX_ERR, "invalid fence\n");
  658. return SYNX_STATE_INVALID;
  659. }
  660. actv_cnt = sig_cnt = err_cnt = 0;
  661. array = to_dma_fence_array(fence);
  662. if (IS_ERR_OR_NULL(array))
  663. return SYNX_STATE_INVALID;
  664. for (i = 0; i < array->num_fences; i++) {
  665. intr = __fence_state(array->fences[i], locked);
  666. if (err_cnt == 0)
  667. parent_state = intr;
  668. switch (intr) {
  669. case SYNX_STATE_ACTIVE:
  670. actv_cnt++;
  671. break;
  672. case SYNX_STATE_SIGNALED_SUCCESS:
  673. sig_cnt++;
  674. break;
  675. default:
  676. intr > SYNX_STATE_SIGNALED_MAX ? sig_cnt++ : err_cnt++;
  677. }
  678. }
  679. dprintk(SYNX_DBG,
  680. "group cnt stats act:%u, sig: %u, err: %u\n",
  681. actv_cnt, sig_cnt, err_cnt);
  682. if (actv_cnt)
  683. state = SYNX_STATE_ACTIVE;
  684. else
  685. state = parent_state;
  686. return state;
  687. }
  688. /*
  689. * WARN: Should not hold the fence spinlock when invoking
  690. * this function. Use synx_fence_state_locked instead
  691. */
  692. u32 synx_util_get_object_status(struct synx_coredata *synx_obj)
  693. {
  694. u32 state;
  695. if (IS_ERR_OR_NULL(synx_obj))
  696. return SYNX_STATE_INVALID;
  697. if (synx_util_is_merged_object(synx_obj))
  698. state = __fence_group_state(synx_obj->fence, false);
  699. else
  700. state = __fence_state(synx_obj->fence, false);
  701. return state;
  702. }
  703. /* use this for status check when holding on to metadata spinlock */
  704. u32 synx_util_get_object_status_locked(struct synx_coredata *synx_obj)
  705. {
  706. u32 state;
  707. if (IS_ERR_OR_NULL(synx_obj))
  708. return SYNX_STATE_INVALID;
  709. if (synx_util_is_merged_object(synx_obj))
  710. state = __fence_group_state(synx_obj->fence, true);
  711. else
  712. state = __fence_state(synx_obj->fence, true);
  713. return state;
  714. }
  715. struct synx_handle_coredata *synx_util_acquire_handle(
  716. struct synx_client *client, u32 h_synx)
  717. {
  718. struct synx_handle_coredata *synx_data = NULL;
  719. struct synx_handle_coredata *synx_handle =
  720. ERR_PTR(-SYNX_NOENT);
  721. if (IS_ERR_OR_NULL(client))
  722. return ERR_PTR(-SYNX_INVALID);
  723. spin_lock_bh(&client->handle_map_lock);
  724. hash_for_each_possible(client->handle_map,
  725. synx_data, node, h_synx) {
  726. if (synx_data->key == h_synx &&
  727. synx_data->rel_count != 0) {
  728. kref_get(&synx_data->refcount);
  729. synx_handle = synx_data;
  730. break;
  731. }
  732. }
  733. spin_unlock_bh(&client->handle_map_lock);
  734. return synx_handle;
  735. }
  736. struct synx_map_entry *synx_util_insert_to_map(
  737. struct synx_coredata *synx_obj,
  738. u32 h_synx, u32 flags)
  739. {
  740. struct synx_map_entry *map_entry;
  741. map_entry = kzalloc(sizeof(*map_entry), GFP_KERNEL);
  742. if (IS_ERR_OR_NULL(map_entry))
  743. return ERR_PTR(-SYNX_NOMEM);
  744. kref_init(&map_entry->refcount);
  745. map_entry->synx_obj = synx_obj;
  746. map_entry->flags = flags;
  747. map_entry->key = h_synx;
  748. if (synx_util_is_global_handle(h_synx)) {
  749. spin_lock_bh(&synx_dev->native->global_map_lock);
  750. hash_add(synx_dev->native->global_map,
  751. &map_entry->node, h_synx);
  752. spin_unlock_bh(&synx_dev->native->global_map_lock);
  753. dprintk(SYNX_MEM,
  754. "added handle %u to global map %pK\n",
  755. h_synx, map_entry);
  756. } else {
  757. spin_lock_bh(&synx_dev->native->local_map_lock);
  758. hash_add(synx_dev->native->local_map,
  759. &map_entry->node, h_synx);
  760. spin_unlock_bh(&synx_dev->native->local_map_lock);
  761. dprintk(SYNX_MEM,
  762. "added handle %u to local map %pK\n",
  763. h_synx, map_entry);
  764. }
  765. return map_entry;
  766. }
  767. struct synx_map_entry *synx_util_get_map_entry(u32 h_synx)
  768. {
  769. struct synx_map_entry *curr;
  770. struct synx_map_entry *map_entry = ERR_PTR(-SYNX_NOENT);
  771. if (h_synx == 0)
  772. return ERR_PTR(-SYNX_INVALID);
  773. if (synx_util_is_global_handle(h_synx)) {
  774. spin_lock_bh(&synx_dev->native->global_map_lock);
  775. hash_for_each_possible(synx_dev->native->global_map,
  776. curr, node, h_synx) {
  777. if (curr->key == h_synx) {
  778. kref_get(&curr->refcount);
  779. map_entry = curr;
  780. break;
  781. }
  782. }
  783. spin_unlock_bh(&synx_dev->native->global_map_lock);
  784. } else {
  785. spin_lock_bh(&synx_dev->native->local_map_lock);
  786. hash_for_each_possible(synx_dev->native->local_map,
  787. curr, node, h_synx) {
  788. if (curr->key == h_synx) {
  789. kref_get(&curr->refcount);
  790. map_entry = curr;
  791. break;
  792. }
  793. }
  794. spin_unlock_bh(&synx_dev->native->local_map_lock);
  795. }
  796. /* should we allocate if entry not found? */
  797. return map_entry;
  798. }
  799. static void synx_util_cleanup_fence(
  800. struct synx_coredata *synx_obj)
  801. {
  802. struct synx_signal_cb *signal_cb;
  803. unsigned long flags;
  804. u32 g_status;
  805. u32 f_status;
  806. u32 h_synx = 0;
  807. mutex_lock(&synx_obj->obj_lock);
  808. synx_obj->map_count--;
  809. signal_cb = synx_obj->signal_cb;
  810. f_status = synx_util_get_object_status(synx_obj);
  811. dprintk(SYNX_VERB, "f_status:%u, signal_cb:%p, map:%u, idx:%u\n",
  812. f_status, signal_cb, synx_obj->map_count, synx_obj->global_idx);
  813. if (synx_obj->map_count == 0 &&
  814. (signal_cb != NULL) &&
  815. (synx_obj->global_idx != 0) &&
  816. (f_status == SYNX_STATE_ACTIVE)) {
  817. /*
  818. * no more clients interested for notification
  819. * on handle on local core.
  820. * remove reference held by callback on synx
  821. * coredata structure and update cb (if still
  822. * un-signaled) with global handle idx to
  823. * notify any cross-core clients waiting on
  824. * handle.
  825. */
  826. g_status = synx_global_get_status(synx_obj->global_idx);
  827. if (g_status > SYNX_STATE_ACTIVE) {
  828. dprintk(SYNX_DBG, "signaling fence %pK with status %u\n",
  829. synx_obj->fence, g_status);
  830. synx_native_signal_fence(synx_obj, g_status);
  831. } else {
  832. spin_lock_irqsave(synx_obj->fence->lock, flags);
  833. if (synx_util_get_object_status_locked(synx_obj) ==
  834. SYNX_STATE_ACTIVE) {
  835. signal_cb->synx_obj = NULL;
  836. synx_global_fetch_handle_details(synx_obj->global_idx, &h_synx);
  837. signal_cb->handle = h_synx;
  838. synx_obj->signal_cb = NULL;
  839. /*
  840. * release reference held by signal cb and
  841. * get reference on global index instead.
  842. */
  843. synx_util_put_object(synx_obj);
  844. synx_global_get_ref(synx_obj->global_idx);
  845. }
  846. spin_unlock_irqrestore(synx_obj->fence->lock, flags);
  847. }
  848. } else if (synx_obj->map_count == 0 && signal_cb &&
  849. (f_status == SYNX_STATE_ACTIVE)) {
  850. if (dma_fence_remove_callback(synx_obj->fence,
  851. &signal_cb->fence_cb)) {
  852. kfree(signal_cb);
  853. synx_obj->signal_cb = NULL;
  854. /*
  855. * release reference held by signal cb and
  856. * get reference on global index instead.
  857. */
  858. synx_util_put_object(synx_obj);
  859. dprintk(SYNX_MEM, "signal cb destroyed %pK\n",
  860. synx_obj->signal_cb);
  861. }
  862. }
  863. mutex_unlock(&synx_obj->obj_lock);
  864. }
  865. static void synx_util_destroy_map_entry_worker(
  866. struct work_struct *dispatch)
  867. {
  868. struct synx_map_entry *map_entry =
  869. container_of(dispatch, struct synx_map_entry, dispatch);
  870. struct synx_coredata *synx_obj;
  871. synx_obj = map_entry->synx_obj;
  872. if (!IS_ERR_OR_NULL(synx_obj)) {
  873. synx_util_cleanup_fence(synx_obj);
  874. /* release reference held by map entry */
  875. synx_util_put_object(synx_obj);
  876. }
  877. if (!synx_util_is_global_handle(map_entry->key))
  878. clear_bit(synx_util_global_idx(map_entry->key),
  879. synx_dev->native->bitmap);
  880. dprintk(SYNX_VERB, "map entry for %u destroyed %pK\n",
  881. map_entry->key, map_entry);
  882. kfree(map_entry);
  883. }
  884. void synx_util_destroy_map_entry(struct kref *kref)
  885. {
  886. struct synx_map_entry *map_entry =
  887. container_of(kref, struct synx_map_entry, refcount);
  888. hash_del(&map_entry->node);
  889. dprintk(SYNX_MEM, "map entry for %u removed %pK\n",
  890. map_entry->key, map_entry);
  891. INIT_WORK(&map_entry->dispatch, synx_util_destroy_map_entry_worker);
  892. queue_work(synx_dev->wq_cleanup, &map_entry->dispatch);
  893. }
  894. void synx_util_release_map_entry(struct synx_map_entry *map_entry)
  895. {
  896. spinlock_t *lock;
  897. if (IS_ERR_OR_NULL(map_entry))
  898. return;
  899. if (synx_util_is_global_handle(map_entry->key))
  900. lock = &synx_dev->native->global_map_lock;
  901. else
  902. lock = &synx_dev->native->local_map_lock;
  903. spin_lock_bh(lock);
  904. kref_put(&map_entry->refcount,
  905. synx_util_destroy_map_entry);
  906. spin_unlock_bh(lock);
  907. }
  908. static void synx_util_destroy_handle_worker(
  909. struct work_struct *dispatch)
  910. {
  911. struct synx_handle_coredata *synx_data =
  912. container_of(dispatch, struct synx_handle_coredata,
  913. dispatch);
  914. synx_util_release_map_entry(synx_data->map_entry);
  915. dprintk(SYNX_VERB, "handle %u destroyed %pK\n",
  916. synx_data->key, synx_data);
  917. kfree(synx_data);
  918. }
  919. void synx_util_destroy_handle(struct kref *kref)
  920. {
  921. struct synx_handle_coredata *synx_data =
  922. container_of(kref, struct synx_handle_coredata,
  923. refcount);
  924. hash_del(&synx_data->node);
  925. dprintk(SYNX_MEM, "[sess :%llu] handle %u removed %pK\n",
  926. synx_data->client->id, synx_data->key, synx_data);
  927. INIT_WORK(&synx_data->dispatch, synx_util_destroy_handle_worker);
  928. queue_work(synx_dev->wq_cleanup, &synx_data->dispatch);
  929. }
  930. void synx_util_release_handle(struct synx_handle_coredata *synx_data)
  931. {
  932. struct synx_client *client;
  933. if (IS_ERR_OR_NULL(synx_data))
  934. return;
  935. client = synx_data->client;
  936. if (IS_ERR_OR_NULL(client))
  937. return;
  938. spin_lock_bh(&client->handle_map_lock);
  939. kref_put(&synx_data->refcount,
  940. synx_util_destroy_handle);
  941. spin_unlock_bh(&client->handle_map_lock);
  942. }
  943. struct bind_operations *synx_util_get_bind_ops(u32 type)
  944. {
  945. struct synx_registered_ops *client_ops;
  946. if (!synx_util_is_valid_bind_type(type))
  947. return NULL;
  948. mutex_lock(&synx_dev->vtbl_lock);
  949. client_ops = &synx_dev->bind_vtbl[type];
  950. if (!client_ops->valid) {
  951. mutex_unlock(&synx_dev->vtbl_lock);
  952. return NULL;
  953. }
  954. mutex_unlock(&synx_dev->vtbl_lock);
  955. return &client_ops->ops;
  956. }
  957. int synx_util_alloc_cb_entry(struct synx_client *client,
  958. struct synx_kernel_payload *data,
  959. u32 *cb_idx)
  960. {
  961. long idx;
  962. struct synx_client_cb *cb;
  963. if (IS_ERR_OR_NULL(client) || IS_ERR_OR_NULL(data) ||
  964. IS_ERR_OR_NULL(cb_idx))
  965. return -SYNX_INVALID;
  966. idx = synx_util_get_free_handle(client->cb_bitmap, SYNX_MAX_OBJS);
  967. if (idx >= SYNX_MAX_OBJS) {
  968. dprintk(SYNX_ERR,
  969. "[sess :%llu] free cb index not available\n",
  970. client->id);
  971. return -SYNX_NOMEM;
  972. }
  973. cb = &client->cb_table[idx];
  974. memset(cb, 0, sizeof(*cb));
  975. cb->is_valid = true;
  976. cb->client = client;
  977. cb->idx = idx;
  978. memcpy(&cb->kernel_cb, data,
  979. sizeof(cb->kernel_cb));
  980. *cb_idx = idx;
  981. dprintk(SYNX_VERB, "[sess :%llu] allocated cb index %u\n",
  982. client->id, *cb_idx);
  983. return 0;
  984. }
  985. int synx_util_clear_cb_entry(struct synx_client *client,
  986. struct synx_client_cb *cb)
  987. {
  988. int rc = 0;
  989. u32 idx;
  990. if (IS_ERR_OR_NULL(cb))
  991. return -SYNX_INVALID;
  992. idx = cb->idx;
  993. memset(cb, 0, sizeof(*cb));
  994. if (idx && idx < SYNX_MAX_OBJS) {
  995. clear_bit(idx, client->cb_bitmap);
  996. } else {
  997. dprintk(SYNX_ERR, "invalid index\n");
  998. rc = -SYNX_INVALID;
  999. }
  1000. return rc;
  1001. }
  1002. void synx_util_default_user_callback(u32 h_synx,
  1003. int status, void *data)
  1004. {
  1005. struct synx_client_cb *cb = data;
  1006. struct synx_client *client = NULL;
  1007. if (cb && cb->client) {
  1008. client = cb->client;
  1009. dprintk(SYNX_VERB,
  1010. "[sess :%llu] user cb queued for handle %d\n",
  1011. client->id, h_synx);
  1012. cb->kernel_cb.status = status;
  1013. mutex_lock(&client->event_q_lock);
  1014. list_add_tail(&cb->node, &client->event_q);
  1015. mutex_unlock(&client->event_q_lock);
  1016. wake_up_all(&client->event_wq);
  1017. } else {
  1018. dprintk(SYNX_ERR, "invalid params\n");
  1019. }
  1020. }
  1021. void synx_util_callback_dispatch(struct synx_coredata *synx_obj, u32 status)
  1022. {
  1023. struct synx_cb_data *synx_cb, *synx_cb_temp;
  1024. if (IS_ERR_OR_NULL(synx_obj)) {
  1025. dprintk(SYNX_ERR, "invalid arguments\n");
  1026. return;
  1027. }
  1028. list_for_each_entry_safe(synx_cb,
  1029. synx_cb_temp, &synx_obj->reg_cbs_list, node) {
  1030. synx_cb->status = status;
  1031. if (synx_cb->timeout != SYNX_NO_TIMEOUT) {
  1032. dprintk(SYNX_VERB,
  1033. "Deleting timer synx_cb 0x%x, timeout 0x%llx\n",
  1034. synx_cb, synx_cb->timeout);
  1035. del_timer(&synx_cb->synx_timer);
  1036. }
  1037. list_del_init(&synx_cb->node);
  1038. queue_work(synx_dev->wq_cb,
  1039. &synx_cb->cb_dispatch);
  1040. dprintk(SYNX_VERB, "dispatched callback\n");
  1041. }
  1042. }
  1043. void synx_util_cb_dispatch(struct work_struct *cb_dispatch)
  1044. {
  1045. struct synx_cb_data *synx_cb =
  1046. container_of(cb_dispatch, struct synx_cb_data, cb_dispatch);
  1047. struct synx_client *client;
  1048. struct synx_client_cb *cb;
  1049. struct synx_kernel_payload payload;
  1050. u32 status;
  1051. client = synx_get_client(synx_cb->session);
  1052. if (IS_ERR_OR_NULL(client)) {
  1053. dprintk(SYNX_ERR,
  1054. "invalid session data %pK in cb payload\n",
  1055. synx_cb->session);
  1056. goto free;
  1057. }
  1058. if (synx_cb->idx == 0 ||
  1059. synx_cb->idx >= SYNX_MAX_OBJS) {
  1060. dprintk(SYNX_ERR,
  1061. "[sess :%llu] invalid cb index %u\n",
  1062. client->id, synx_cb->idx);
  1063. goto fail;
  1064. }
  1065. status = synx_cb->status;
  1066. cb = &client->cb_table[synx_cb->idx];
  1067. if (!cb->is_valid) {
  1068. dprintk(SYNX_ERR, "invalid cb payload\n");
  1069. goto fail;
  1070. }
  1071. memcpy(&payload, &cb->kernel_cb, sizeof(cb->kernel_cb));
  1072. payload.status = status;
  1073. if (payload.cb_func == synx_util_default_user_callback) {
  1074. /*
  1075. * need to send client cb data for default
  1076. * user cb (userspace cb)
  1077. */
  1078. payload.data = cb;
  1079. } else {
  1080. /*
  1081. * clear the cb entry. userspace cb entry
  1082. * will be cleared after data read by the
  1083. * polling thread or when client is destroyed
  1084. */
  1085. if (synx_util_clear_cb_entry(client, cb))
  1086. dprintk(SYNX_ERR,
  1087. "[sess :%llu] error clearing cb entry\n",
  1088. client->id);
  1089. }
  1090. dprintk(SYNX_DBG,
  1091. "callback dispatched for handle %u, status %u, data %pK\n",
  1092. payload.h_synx, payload.status, payload.data);
  1093. /* dispatch kernel callback */
  1094. payload.cb_func(payload.h_synx,
  1095. payload.status, payload.data);
  1096. fail:
  1097. synx_put_client(client);
  1098. free:
  1099. kfree(synx_cb);
  1100. }
  1101. int synx_get_child_coredata(struct synx_coredata *synx_obj, struct synx_coredata ***child_synx_obj, int *num_fences)
  1102. {
  1103. int rc = SYNX_SUCCESS;
  1104. int i = 0, handle_count = 0;
  1105. u32 h_child = 0;
  1106. struct dma_fence_array *array = NULL;
  1107. struct synx_coredata **synx_datas = NULL;
  1108. struct synx_map_entry *fence_entry = NULL;
  1109. if (IS_ERR_OR_NULL(synx_obj) || IS_ERR_OR_NULL(num_fences))
  1110. return -SYNX_INVALID;
  1111. if (dma_fence_is_array(synx_obj->fence)) {
  1112. array = to_dma_fence_array(synx_obj->fence);
  1113. if (IS_ERR_OR_NULL(array))
  1114. return -SYNX_INVALID;
  1115. synx_datas = kcalloc(array->num_fences, sizeof(*synx_datas), GFP_KERNEL);
  1116. if (IS_ERR_OR_NULL(synx_datas))
  1117. return -SYNX_NOMEM;
  1118. for (i = 0; i < array->num_fences; i++) {
  1119. h_child = synx_util_get_fence_entry((u64)array->fences[i], 1);
  1120. fence_entry = synx_util_get_map_entry(h_child);
  1121. if (IS_ERR_OR_NULL(fence_entry) || IS_ERR_OR_NULL(fence_entry->synx_obj))
  1122. {
  1123. dprintk(SYNX_ERR, "Invalid handle access %u", h_child);
  1124. rc = -SYNX_NOENT;
  1125. goto fail;
  1126. }
  1127. synx_datas[handle_count++] = fence_entry->synx_obj;
  1128. synx_util_release_map_entry(fence_entry);
  1129. }
  1130. }
  1131. *child_synx_obj = synx_datas;
  1132. *num_fences = handle_count;
  1133. return rc;
  1134. fail:
  1135. kfree(synx_datas);
  1136. return rc;
  1137. }
  1138. u32 synx_util_get_fence_entry(u64 key, u32 global)
  1139. {
  1140. u32 h_synx = 0;
  1141. struct synx_fence_entry *curr;
  1142. spin_lock_bh(&synx_dev->native->fence_map_lock);
  1143. hash_for_each_possible(synx_dev->native->fence_map,
  1144. curr, node, key) {
  1145. if (curr->key == key) {
  1146. if (global)
  1147. h_synx = curr->g_handle;
  1148. /* return local handle if global not available */
  1149. if (h_synx == 0)
  1150. h_synx = curr->l_handle;
  1151. break;
  1152. }
  1153. }
  1154. spin_unlock_bh(&synx_dev->native->fence_map_lock);
  1155. return h_synx;
  1156. }
  1157. void synx_util_release_fence_entry(u64 key)
  1158. {
  1159. struct synx_fence_entry *entry = NULL, *curr;
  1160. spin_lock_bh(&synx_dev->native->fence_map_lock);
  1161. hash_for_each_possible(synx_dev->native->fence_map,
  1162. curr, node, key) {
  1163. if (curr->key == key) {
  1164. entry = curr;
  1165. break;
  1166. }
  1167. }
  1168. if (entry) {
  1169. hash_del(&entry->node);
  1170. dprintk(SYNX_MEM,
  1171. "released fence entry %pK for fence %pK\n",
  1172. entry, (void *)key);
  1173. kfree(entry);
  1174. }
  1175. spin_unlock_bh(&synx_dev->native->fence_map_lock);
  1176. }
  1177. int synx_util_insert_fence_entry(struct synx_fence_entry *entry,
  1178. u32 *h_synx, u32 global)
  1179. {
  1180. int rc = SYNX_SUCCESS;
  1181. struct synx_fence_entry *curr;
  1182. if (IS_ERR_OR_NULL(entry) || IS_ERR_OR_NULL(h_synx))
  1183. return -SYNX_INVALID;
  1184. spin_lock_bh(&synx_dev->native->fence_map_lock);
  1185. hash_for_each_possible(synx_dev->native->fence_map,
  1186. curr, node, entry->key) {
  1187. /* raced with import from another process on same fence */
  1188. if (curr->key == entry->key) {
  1189. if (global)
  1190. *h_synx = curr->g_handle;
  1191. if (*h_synx == 0 || !global)
  1192. *h_synx = curr->l_handle;
  1193. rc = -SYNX_ALREADY;
  1194. break;
  1195. }
  1196. }
  1197. /* add entry only if its not present in the map */
  1198. if (rc == SYNX_SUCCESS) {
  1199. hash_add(synx_dev->native->fence_map,
  1200. &entry->node, entry->key);
  1201. dprintk(SYNX_MEM,
  1202. "added fence entry %pK for fence %pK\n",
  1203. entry, (void *)entry->key);
  1204. }
  1205. spin_unlock_bh(&synx_dev->native->fence_map_lock);
  1206. return rc;
  1207. }
  1208. struct synx_client *synx_get_client(struct synx_session *session)
  1209. {
  1210. struct synx_client *client = NULL;
  1211. struct synx_client *curr;
  1212. if (IS_ERR_OR_NULL(session))
  1213. return ERR_PTR(-SYNX_INVALID);
  1214. spin_lock_bh(&synx_dev->native->metadata_map_lock);
  1215. hash_for_each_possible(synx_dev->native->client_metadata_map,
  1216. curr, node, (u64)session) {
  1217. if (curr == (struct synx_client *)session) {
  1218. if (curr->active) {
  1219. kref_get(&curr->refcount);
  1220. client = curr;
  1221. }
  1222. break;
  1223. }
  1224. }
  1225. spin_unlock_bh(&synx_dev->native->metadata_map_lock);
  1226. return client;
  1227. }
  1228. static void synx_client_cleanup(struct work_struct *dispatch)
  1229. {
  1230. int i, j;
  1231. struct synx_client *client =
  1232. container_of(dispatch, struct synx_client, dispatch);
  1233. struct synx_handle_coredata *curr;
  1234. struct hlist_node *tmp;
  1235. dprintk(SYNX_INFO, "[sess :%llu] session removed %s\n",
  1236. client->id, client->name);
  1237. /*
  1238. * go over all the remaining synx obj handles
  1239. * un-released from this session and remove them.
  1240. */
  1241. hash_for_each_safe(client->handle_map, i, tmp, curr, node) {
  1242. dprintk(SYNX_WARN,
  1243. "[sess :%llu] un-released handle %u\n",
  1244. client->id, curr->key);
  1245. j = kref_read(&curr->refcount);
  1246. /* release pending reference */
  1247. while (j--)
  1248. kref_put(&curr->refcount, synx_util_destroy_handle);
  1249. }
  1250. mutex_destroy(&client->event_q_lock);
  1251. dprintk(SYNX_VERB, "session %llu [%s] destroyed %pK\n",
  1252. client->id, client->name, client);
  1253. vfree(client);
  1254. }
  1255. void synx_client_destroy(struct kref *kref)
  1256. {
  1257. struct synx_client *client =
  1258. container_of(kref, struct synx_client, refcount);
  1259. hash_del(&client->node);
  1260. INIT_WORK(&client->dispatch, synx_client_cleanup);
  1261. queue_work(synx_dev->wq_cleanup, &client->dispatch);
  1262. }
  1263. void synx_put_client(struct synx_client *client)
  1264. {
  1265. if (IS_ERR_OR_NULL(client))
  1266. return;
  1267. spin_lock_bh(&synx_dev->native->metadata_map_lock);
  1268. kref_put(&client->refcount, synx_client_destroy);
  1269. spin_unlock_bh(&synx_dev->native->metadata_map_lock);
  1270. }
  1271. void synx_util_generate_timestamp(char *timestamp, size_t size)
  1272. {
  1273. struct timespec64 tv;
  1274. struct tm tm;
  1275. ktime_get_real_ts64(&tv);
  1276. time64_to_tm(tv.tv_sec, 0, &tm);
  1277. snprintf(timestamp, size, "%02d-%02d %02d:%02d:%02d",
  1278. tm.tm_mon + 1, tm.tm_mday, tm.tm_hour,
  1279. tm.tm_min, tm.tm_sec);
  1280. }
  1281. void synx_util_log_error(u32 client_id, u32 h_synx, s32 err)
  1282. {
  1283. struct error_node *err_node;
  1284. if (!synx_dev->debugfs_root)
  1285. return;
  1286. err_node = kzalloc(sizeof(*err_node), GFP_KERNEL);
  1287. if (!err_node)
  1288. return;
  1289. err_node->client_id = client_id;
  1290. err_node->error_code = err;
  1291. err_node->h_synx = h_synx;
  1292. synx_util_generate_timestamp(err_node->timestamp,
  1293. sizeof(err_node->timestamp));
  1294. mutex_lock(&synx_dev->error_lock);
  1295. list_add(&err_node->node,
  1296. &synx_dev->error_list);
  1297. mutex_unlock(&synx_dev->error_lock);
  1298. }
  1299. int synx_util_save_data(void *fence, u32 flags,
  1300. u32 h_synx)
  1301. {
  1302. int rc = SYNX_SUCCESS;
  1303. struct synx_entry_64 *entry, *curr;
  1304. u64 key;
  1305. u32 tbl = synx_util_map_params_to_type(flags);
  1306. switch (tbl) {
  1307. case SYNX_TYPE_CSL:
  1308. key = *(u32 *)fence;
  1309. spin_lock_bh(&synx_dev->native->csl_map_lock);
  1310. /* ensure fence is not already added to map */
  1311. hash_for_each_possible(synx_dev->native->csl_fence_map,
  1312. curr, node, key) {
  1313. if (curr->key == key) {
  1314. rc = -SYNX_ALREADY;
  1315. break;
  1316. }
  1317. }
  1318. if (rc == SYNX_SUCCESS) {
  1319. entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
  1320. if (entry) {
  1321. entry->data[0] = h_synx;
  1322. entry->key = key;
  1323. kref_init(&entry->refcount);
  1324. hash_add(synx_dev->native->csl_fence_map,
  1325. &entry->node, entry->key);
  1326. dprintk(SYNX_MEM, "added csl fence %d to map %pK\n",
  1327. entry->key, entry);
  1328. } else {
  1329. rc = -SYNX_NOMEM;
  1330. }
  1331. }
  1332. spin_unlock_bh(&synx_dev->native->csl_map_lock);
  1333. break;
  1334. default:
  1335. dprintk(SYNX_ERR, "invalid hash table selection\n");
  1336. kfree(entry);
  1337. rc = -SYNX_INVALID;
  1338. }
  1339. return rc;
  1340. }
  1341. struct synx_entry_64 *synx_util_retrieve_data(void *fence,
  1342. u32 type)
  1343. {
  1344. u64 key;
  1345. struct synx_entry_64 *entry = NULL;
  1346. struct synx_entry_64 *curr;
  1347. switch (type) {
  1348. case SYNX_TYPE_CSL:
  1349. key = *(u32 *)fence;
  1350. spin_lock_bh(&synx_dev->native->csl_map_lock);
  1351. hash_for_each_possible(synx_dev->native->csl_fence_map,
  1352. curr, node, key) {
  1353. if (curr->key == key) {
  1354. kref_get(&curr->refcount);
  1355. entry = curr;
  1356. break;
  1357. }
  1358. }
  1359. spin_unlock_bh(&synx_dev->native->csl_map_lock);
  1360. break;
  1361. default:
  1362. dprintk(SYNX_ERR, "invalid hash table selection %u\n",
  1363. type);
  1364. }
  1365. return entry;
  1366. }
  1367. void synx_util_destroy_data(struct kref *kref)
  1368. {
  1369. struct synx_entry_64 *entry =
  1370. container_of(kref, struct synx_entry_64, refcount);
  1371. hash_del(&entry->node);
  1372. dprintk(SYNX_MEM, "released fence %llu entry %pK\n",
  1373. entry->key, entry);
  1374. kfree(entry);
  1375. }
  1376. void synx_util_remove_data(void *fence,
  1377. u32 type)
  1378. {
  1379. u64 key;
  1380. struct synx_entry_64 *entry = NULL;
  1381. struct synx_entry_64 *curr;
  1382. if (IS_ERR_OR_NULL(fence))
  1383. return;
  1384. switch (type) {
  1385. case SYNX_TYPE_CSL:
  1386. key = *((u32 *)fence);
  1387. spin_lock_bh(&synx_dev->native->csl_map_lock);
  1388. hash_for_each_possible(synx_dev->native->csl_fence_map,
  1389. curr, node, key) {
  1390. if (curr->key == key) {
  1391. entry = curr;
  1392. break;
  1393. }
  1394. }
  1395. if (entry)
  1396. kref_put(&entry->refcount, synx_util_destroy_data);
  1397. spin_unlock_bh(&synx_dev->native->csl_map_lock);
  1398. break;
  1399. default:
  1400. dprintk(SYNX_ERR, "invalid hash table selection %u\n",
  1401. type);
  1402. }
  1403. }
  1404. void synx_util_map_import_params_to_create(
  1405. struct synx_import_indv_params *params,
  1406. struct synx_create_params *c_params)
  1407. {
  1408. if (IS_ERR_OR_NULL(params) || IS_ERR_OR_NULL(c_params))
  1409. return;
  1410. if (params->flags & SYNX_IMPORT_GLOBAL_FENCE)
  1411. c_params->flags |= SYNX_CREATE_GLOBAL_FENCE;
  1412. if (params->flags & SYNX_IMPORT_LOCAL_FENCE)
  1413. c_params->flags |= SYNX_CREATE_LOCAL_FENCE;
  1414. if (params->flags & SYNX_IMPORT_DMA_FENCE)
  1415. c_params->flags |= SYNX_CREATE_DMA_FENCE;
  1416. }
  1417. u32 synx_util_map_client_id_to_core(
  1418. enum synx_client_id id)
  1419. {
  1420. u32 core_id;
  1421. switch (id) {
  1422. case SYNX_CLIENT_NATIVE:
  1423. core_id = SYNX_CORE_APSS; break;
  1424. case SYNX_CLIENT_ICP_CTX0:
  1425. core_id = SYNX_CORE_ICP; break;
  1426. case SYNX_CLIENT_EVA_CTX0:
  1427. core_id = SYNX_CORE_EVA; break;
  1428. case SYNX_CLIENT_VID_CTX0:
  1429. core_id = SYNX_CORE_IRIS; break;
  1430. case SYNX_CLIENT_NSP_CTX0:
  1431. core_id = SYNX_CORE_NSP; break;
  1432. default:
  1433. core_id = SYNX_CORE_MAX;
  1434. }
  1435. return core_id;
  1436. }