synx_util.c 38 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2019-2021, The Linux Foundation. All rights reserved.
  4. * Copyright (c) 2022-2023, Qualcomm Innovation Center, Inc. All rights reserved.
  5. */
  6. #include <linux/slab.h>
  7. #include <linux/random.h>
  8. #include <linux/vmalloc.h>
  9. #include "synx_debugfs.h"
  10. #include "synx_util.h"
  11. extern void synx_external_callback(s32 sync_obj, int status, void *data);
  12. int synx_util_init_coredata(struct synx_coredata *synx_obj,
  13. struct synx_create_params *params,
  14. struct dma_fence_ops *ops,
  15. u64 dma_context)
  16. {
  17. int rc = -SYNX_INVALID;
  18. spinlock_t *fence_lock;
  19. struct dma_fence *fence;
  20. struct synx_fence_entry *entry;
  21. if (IS_ERR_OR_NULL(synx_obj) || IS_ERR_OR_NULL(params) ||
  22. IS_ERR_OR_NULL(ops) || IS_ERR_OR_NULL(params->h_synx))
  23. return -SYNX_INVALID;
  24. if (params->flags & SYNX_CREATE_GLOBAL_FENCE &&
  25. *params->h_synx != 0) {
  26. rc = synx_global_get_ref(
  27. synx_util_global_idx(*params->h_synx));
  28. synx_obj->global_idx = synx_util_global_idx(*params->h_synx);
  29. } else if (params->flags & SYNX_CREATE_GLOBAL_FENCE) {
  30. rc = synx_alloc_global_handle(params->h_synx);
  31. synx_obj->global_idx = synx_util_global_idx(*params->h_synx);
  32. } else {
  33. rc = synx_alloc_local_handle(params->h_synx);
  34. }
  35. if (rc != SYNX_SUCCESS)
  36. return rc;
  37. synx_obj->map_count = 1;
  38. synx_obj->num_bound_synxs = 0;
  39. synx_obj->type |= params->flags;
  40. kref_init(&synx_obj->refcount);
  41. mutex_init(&synx_obj->obj_lock);
  42. INIT_LIST_HEAD(&synx_obj->reg_cbs_list);
  43. if (params->name)
  44. strlcpy(synx_obj->name, params->name, sizeof(synx_obj->name));
  45. if (params->flags & SYNX_CREATE_DMA_FENCE) {
  46. fence = params->fence;
  47. if (IS_ERR_OR_NULL(fence)) {
  48. dprintk(SYNX_ERR, "invalid external fence\n");
  49. goto free;
  50. }
  51. dma_fence_get(fence);
  52. synx_obj->fence = fence;
  53. } else {
  54. /*
  55. * lock and fence memory will be released in fence
  56. * release function
  57. */
  58. fence_lock = kzalloc(sizeof(*fence_lock), GFP_KERNEL);
  59. if (IS_ERR_OR_NULL(fence_lock)) {
  60. rc = -SYNX_NOMEM;
  61. goto free;
  62. }
  63. fence = kzalloc(sizeof(*fence), GFP_KERNEL);
  64. if (IS_ERR_OR_NULL(fence)) {
  65. kfree(fence_lock);
  66. rc = -SYNX_NOMEM;
  67. goto free;
  68. }
  69. spin_lock_init(fence_lock);
  70. dma_fence_init(fence, ops, fence_lock, dma_context, 1);
  71. synx_obj->fence = fence;
  72. synx_util_activate(synx_obj);
  73. dprintk(SYNX_MEM,
  74. "allocated backing fence %pK\n", fence);
  75. entry = kzalloc(sizeof(*entry), GFP_KERNEL);
  76. if (IS_ERR_OR_NULL(entry)) {
  77. rc = -SYNX_NOMEM;
  78. goto clean;
  79. }
  80. entry->key = (u64)fence;
  81. if (params->flags & SYNX_CREATE_GLOBAL_FENCE)
  82. entry->g_handle = *params->h_synx;
  83. else
  84. entry->l_handle = *params->h_synx;
  85. rc = synx_util_insert_fence_entry(entry,
  86. params->h_synx,
  87. params->flags & SYNX_CREATE_GLOBAL_FENCE);
  88. BUG_ON(rc != SYNX_SUCCESS);
  89. }
  90. if (rc != SYNX_SUCCESS)
  91. goto clean;
  92. return SYNX_SUCCESS;
  93. clean:
  94. dma_fence_put(fence);
  95. free:
  96. if (params->flags & SYNX_CREATE_GLOBAL_FENCE)
  97. synx_global_put_ref(
  98. synx_util_global_idx(*params->h_synx));
  99. else
  100. clear_bit(synx_util_global_idx(*params->h_synx),
  101. synx_dev->native->bitmap);
  102. return rc;
  103. }
  104. int synx_util_add_callback(struct synx_coredata *synx_obj,
  105. u32 h_synx)
  106. {
  107. int rc;
  108. struct synx_signal_cb *signal_cb;
  109. if (IS_ERR_OR_NULL(synx_obj))
  110. return -SYNX_INVALID;
  111. signal_cb = kzalloc(sizeof(*signal_cb), GFP_KERNEL);
  112. if (IS_ERR_OR_NULL(signal_cb))
  113. return -SYNX_NOMEM;
  114. signal_cb->handle = h_synx;
  115. signal_cb->flag = SYNX_SIGNAL_FROM_FENCE;
  116. signal_cb->synx_obj = synx_obj;
  117. /* get reference on synx coredata for signal cb */
  118. synx_util_get_object(synx_obj);
  119. /*
  120. * adding callback enables synx framework to
  121. * get notified on signal from clients using
  122. * native dma fence operations.
  123. */
  124. rc = dma_fence_add_callback(synx_obj->fence,
  125. &signal_cb->fence_cb, synx_fence_callback);
  126. if (rc != 0) {
  127. if (rc == -ENOENT) {
  128. if (synx_util_is_global_object(synx_obj)) {
  129. /* signal (if) global handle */
  130. rc = synx_global_update_status(
  131. synx_obj->global_idx,
  132. synx_util_get_object_status(synx_obj));
  133. if (rc != SYNX_SUCCESS)
  134. dprintk(SYNX_ERR,
  135. "status update of %u with fence %pK\n",
  136. synx_obj->global_idx, synx_obj->fence);
  137. } else {
  138. rc = SYNX_SUCCESS;
  139. }
  140. } else {
  141. dprintk(SYNX_ERR,
  142. "error adding callback for %pK err %d\n",
  143. synx_obj->fence, rc);
  144. }
  145. synx_util_put_object(synx_obj);
  146. kfree(signal_cb);
  147. return rc;
  148. }
  149. synx_obj->signal_cb = signal_cb;
  150. dprintk(SYNX_VERB, "added callback %pK to fence %pK\n",
  151. signal_cb, synx_obj->fence);
  152. return SYNX_SUCCESS;
  153. }
  154. int synx_util_init_group_coredata(struct synx_coredata *synx_obj,
  155. struct dma_fence **fences,
  156. struct synx_merge_params *params,
  157. u32 num_objs,
  158. u64 dma_context)
  159. {
  160. int rc;
  161. struct dma_fence_array *array;
  162. if (IS_ERR_OR_NULL(synx_obj))
  163. return -SYNX_INVALID;
  164. if (params->flags & SYNX_MERGE_GLOBAL_FENCE) {
  165. rc = synx_alloc_global_handle(params->h_merged_obj);
  166. synx_obj->global_idx =
  167. synx_util_global_idx(*params->h_merged_obj);
  168. } else {
  169. rc = synx_alloc_local_handle(params->h_merged_obj);
  170. }
  171. if (rc != SYNX_SUCCESS)
  172. return rc;
  173. array = dma_fence_array_create(num_objs, fences,
  174. dma_context, 1, false);
  175. if (IS_ERR_OR_NULL(array))
  176. return -SYNX_INVALID;
  177. synx_obj->fence = &array->base;
  178. synx_obj->map_count = 1;
  179. synx_obj->type = params->flags;
  180. synx_obj->type |= SYNX_CREATE_MERGED_FENCE;
  181. synx_obj->num_bound_synxs = 0;
  182. kref_init(&synx_obj->refcount);
  183. mutex_init(&synx_obj->obj_lock);
  184. INIT_LIST_HEAD(&synx_obj->reg_cbs_list);
  185. synx_util_activate(synx_obj);
  186. return rc;
  187. }
  188. static void synx_util_destroy_coredata(struct kref *kref)
  189. {
  190. int rc;
  191. struct synx_coredata *synx_obj =
  192. container_of(kref, struct synx_coredata, refcount);
  193. if (synx_util_is_global_object(synx_obj)) {
  194. rc = synx_global_clear_subscribed_core(synx_obj->global_idx, SYNX_CORE_APSS);
  195. if (rc)
  196. dprintk(SYNX_ERR, "Failed to clear subscribers");
  197. synx_global_put_ref(synx_obj->global_idx);
  198. }
  199. synx_util_object_destroy(synx_obj);
  200. }
  201. void synx_util_get_object(struct synx_coredata *synx_obj)
  202. {
  203. kref_get(&synx_obj->refcount);
  204. }
  205. void synx_util_put_object(struct synx_coredata *synx_obj)
  206. {
  207. kref_put(&synx_obj->refcount, synx_util_destroy_coredata);
  208. }
  209. void synx_util_object_destroy(struct synx_coredata *synx_obj)
  210. {
  211. int rc;
  212. u32 i;
  213. s32 sync_id;
  214. u32 type;
  215. unsigned long flags;
  216. struct synx_cb_data *synx_cb, *synx_cb_temp;
  217. struct synx_bind_desc *bind_desc;
  218. struct bind_operations *bind_ops;
  219. struct synx_external_data *data;
  220. /* clear all the undispatched callbacks */
  221. list_for_each_entry_safe(synx_cb,
  222. synx_cb_temp, &synx_obj->reg_cbs_list, node) {
  223. dprintk(SYNX_ERR,
  224. "dipatching un-released callbacks of session %pK\n",
  225. synx_cb->session);
  226. synx_cb->status = SYNX_STATE_SIGNALED_CANCEL;
  227. list_del_init(&synx_cb->node);
  228. queue_work(synx_dev->wq_cb,
  229. &synx_cb->cb_dispatch);
  230. dprintk(SYNX_VERB, "dispatched callback for fence %pKn", synx_obj->fence);
  231. }
  232. for (i = 0; i < synx_obj->num_bound_synxs; i++) {
  233. bind_desc = &synx_obj->bound_synxs[i];
  234. sync_id = bind_desc->external_desc.id;
  235. type = bind_desc->external_desc.type;
  236. data = bind_desc->external_data;
  237. bind_ops = synx_util_get_bind_ops(type);
  238. if (IS_ERR_OR_NULL(bind_ops)) {
  239. dprintk(SYNX_ERR,
  240. "bind ops fail id: %d, type: %u, err: %d\n",
  241. sync_id, type, rc);
  242. continue;
  243. }
  244. /* clear the hash table entry */
  245. synx_util_remove_data(&sync_id, type);
  246. rc = bind_ops->deregister_callback(
  247. synx_external_callback, data, sync_id);
  248. if (rc < 0) {
  249. dprintk(SYNX_ERR,
  250. "de-registration fail id: %d, type: %u, err: %d\n",
  251. sync_id, type, rc);
  252. continue;
  253. }
  254. /*
  255. * release the memory allocated for external data.
  256. * It is safe to release this memory
  257. * only if deregistration is successful.
  258. */
  259. kfree(data);
  260. }
  261. mutex_destroy(&synx_obj->obj_lock);
  262. synx_util_release_fence_entry((u64)synx_obj->fence);
  263. /* dma fence framework expects handles are signaled before release,
  264. * so signal if active handle and has last refcount. Synx handles
  265. * on other cores are still active to carry out usual callflow.
  266. */
  267. if (!IS_ERR_OR_NULL(synx_obj->fence)) {
  268. spin_lock_irqsave(synx_obj->fence->lock, flags);
  269. if (kref_read(&synx_obj->fence->refcount) == 1 &&
  270. (synx_util_get_object_status_locked(synx_obj) ==
  271. SYNX_STATE_ACTIVE)) {
  272. // set fence error to cancel
  273. dma_fence_set_error(synx_obj->fence,
  274. -SYNX_STATE_SIGNALED_CANCEL);
  275. rc = dma_fence_signal_locked(synx_obj->fence);
  276. if (rc)
  277. dprintk(SYNX_ERR,
  278. "signaling fence %pK failed=%d\n",
  279. synx_obj->fence, rc);
  280. }
  281. spin_unlock_irqrestore(synx_obj->fence->lock, flags);
  282. }
  283. dma_fence_put(synx_obj->fence);
  284. kfree(synx_obj);
  285. dprintk(SYNX_MEM, "released synx object %pK\n", synx_obj);
  286. }
  287. long synx_util_get_free_handle(unsigned long *bitmap, unsigned int size)
  288. {
  289. bool bit;
  290. long idx;
  291. do {
  292. idx = find_first_zero_bit(bitmap, size);
  293. if (idx >= size)
  294. break;
  295. bit = test_and_set_bit(idx, bitmap);
  296. } while (bit);
  297. return idx;
  298. }
  299. u32 synx_encode_handle(u32 idx, u32 core_id, bool global_idx)
  300. {
  301. u32 handle = 0;
  302. if (idx >= SYNX_MAX_OBJS)
  303. return 0;
  304. if (global_idx) {
  305. handle = 1;
  306. handle <<= SYNX_HANDLE_CORE_BITS;
  307. }
  308. handle |= core_id;
  309. handle <<= SYNX_HANDLE_INDEX_BITS;
  310. handle |= idx;
  311. return handle;
  312. }
  313. int synx_alloc_global_handle(u32 *new_synx)
  314. {
  315. int rc;
  316. u32 idx;
  317. rc = synx_global_alloc_index(&idx);
  318. if (rc != SYNX_SUCCESS)
  319. return rc;
  320. *new_synx = synx_encode_handle(idx, SYNX_CORE_APSS, true);
  321. dprintk(SYNX_DBG, "allocated global handle %u (0x%x)\n",
  322. *new_synx, *new_synx);
  323. rc = synx_global_init_coredata(*new_synx);
  324. return rc;
  325. }
  326. int synx_alloc_local_handle(u32 *new_synx)
  327. {
  328. u32 idx;
  329. idx = synx_util_get_free_handle(synx_dev->native->bitmap,
  330. SYNX_MAX_OBJS);
  331. if (idx >= SYNX_MAX_OBJS)
  332. return -SYNX_NOMEM;
  333. *new_synx = synx_encode_handle(idx, SYNX_CORE_APSS, false);
  334. dprintk(SYNX_DBG, "allocated local handle %u (0x%x)\n",
  335. *new_synx, *new_synx);
  336. return SYNX_SUCCESS;
  337. }
  338. int synx_util_init_handle(struct synx_client *client,
  339. struct synx_coredata *synx_obj, u32 *new_h_synx,
  340. void *map_entry)
  341. {
  342. int rc = SYNX_SUCCESS;
  343. bool found = false;
  344. struct synx_handle_coredata *synx_data, *curr;
  345. if (IS_ERR_OR_NULL(client) || IS_ERR_OR_NULL(synx_obj) ||
  346. IS_ERR_OR_NULL(new_h_synx) || IS_ERR_OR_NULL(map_entry))
  347. return -SYNX_INVALID;
  348. synx_data = kzalloc(sizeof(*synx_data), GFP_ATOMIC);
  349. if (IS_ERR_OR_NULL(synx_data))
  350. return -SYNX_NOMEM;
  351. synx_data->client = client;
  352. synx_data->synx_obj = synx_obj;
  353. synx_data->key = *new_h_synx;
  354. synx_data->map_entry = map_entry;
  355. kref_init(&synx_data->refcount);
  356. synx_data->rel_count = 1;
  357. spin_lock_bh(&client->handle_map_lock);
  358. hash_for_each_possible(client->handle_map,
  359. curr, node, *new_h_synx) {
  360. if (curr->key == *new_h_synx) {
  361. if (curr->synx_obj != synx_obj) {
  362. rc = -SYNX_INVALID;
  363. dprintk(SYNX_ERR,
  364. "inconsistent data in handle map\n");
  365. } else {
  366. kref_get(&curr->refcount);
  367. curr->rel_count++;
  368. }
  369. found = true;
  370. break;
  371. }
  372. }
  373. if (unlikely(found))
  374. kfree(synx_data);
  375. else
  376. hash_add(client->handle_map,
  377. &synx_data->node, *new_h_synx);
  378. spin_unlock_bh(&client->handle_map_lock);
  379. return rc;
  380. }
  381. int synx_util_activate(struct synx_coredata *synx_obj)
  382. {
  383. if (IS_ERR_OR_NULL(synx_obj))
  384. return -SYNX_INVALID;
  385. /* move synx to ACTIVE state and register cb for merged object */
  386. dma_fence_enable_sw_signaling(synx_obj->fence);
  387. return 0;
  388. }
  389. static u32 synx_util_get_references(struct synx_coredata *synx_obj)
  390. {
  391. u32 count = 0;
  392. u32 i = 0;
  393. struct dma_fence_array *array = NULL;
  394. /* obtain dma fence reference */
  395. if (dma_fence_is_array(synx_obj->fence)) {
  396. array = to_dma_fence_array(synx_obj->fence);
  397. if (IS_ERR_OR_NULL(array))
  398. return 0;
  399. for (i = 0; i < array->num_fences; i++)
  400. dma_fence_get(array->fences[i]);
  401. count = array->num_fences;
  402. } else {
  403. dma_fence_get(synx_obj->fence);
  404. count = 1;
  405. }
  406. return count;
  407. }
  408. static void synx_util_put_references(struct synx_coredata *synx_obj)
  409. {
  410. u32 i = 0;
  411. struct dma_fence_array *array = NULL;
  412. if (dma_fence_is_array(synx_obj->fence)) {
  413. array = to_dma_fence_array(synx_obj->fence);
  414. if (IS_ERR_OR_NULL(array))
  415. return;
  416. for (i = 0; i < array->num_fences; i++)
  417. dma_fence_put(array->fences[i]);
  418. } else {
  419. dma_fence_put(synx_obj->fence);
  420. }
  421. }
  422. static u32 synx_util_add_fence(struct synx_coredata *synx_obj,
  423. struct dma_fence **fences,
  424. u32 idx)
  425. {
  426. struct dma_fence_array *array = NULL;
  427. u32 i = 0;
  428. if (dma_fence_is_array(synx_obj->fence)) {
  429. array = to_dma_fence_array(synx_obj->fence);
  430. if (IS_ERR_OR_NULL(array))
  431. return 0;
  432. for (i = 0; i < array->num_fences; i++)
  433. fences[idx+i] = array->fences[i];
  434. return array->num_fences;
  435. }
  436. fences[idx] = synx_obj->fence;
  437. return 1;
  438. }
  439. static u32 synx_util_remove_duplicates(struct dma_fence **arr, u32 num)
  440. {
  441. int i, j;
  442. u32 wr_idx = 1;
  443. if (IS_ERR_OR_NULL(arr)) {
  444. dprintk(SYNX_ERR, "invalid input array\n");
  445. return 0;
  446. }
  447. for (i = 1; i < num; i++) {
  448. for (j = 0; j < wr_idx ; j++) {
  449. if (arr[i] == arr[j]) {
  450. /* release reference obtained for duplicate */
  451. dprintk(SYNX_DBG,
  452. "releasing duplicate reference\n");
  453. dma_fence_put(arr[i]);
  454. break;
  455. }
  456. }
  457. if (j == wr_idx)
  458. arr[wr_idx++] = arr[i];
  459. }
  460. return wr_idx;
  461. }
  462. s32 synx_util_merge_error(struct synx_client *client,
  463. u32 *h_synxs,
  464. u32 num_objs)
  465. {
  466. u32 i = 0;
  467. struct synx_handle_coredata *synx_data;
  468. struct synx_coredata *synx_obj;
  469. if (IS_ERR_OR_NULL(client) || IS_ERR_OR_NULL(h_synxs))
  470. return -SYNX_INVALID;
  471. for (i = 0; i < num_objs; i++) {
  472. synx_data = synx_util_acquire_handle(client, h_synxs[i]);
  473. synx_obj = synx_util_obtain_object(synx_data);
  474. if (IS_ERR_OR_NULL(synx_obj) ||
  475. IS_ERR_OR_NULL(synx_obj->fence)) {
  476. dprintk(SYNX_ERR,
  477. "[sess :%llu] invalid handle %d in cleanup\n",
  478. client->id, h_synxs[i]);
  479. continue;
  480. }
  481. /* release all references obtained during merge validatation */
  482. synx_util_put_references(synx_obj);
  483. synx_util_release_handle(synx_data);
  484. }
  485. return 0;
  486. }
  487. int synx_util_validate_merge(struct synx_client *client,
  488. u32 *h_synxs,
  489. u32 num_objs,
  490. struct dma_fence ***fence_list,
  491. u32 *fence_cnt)
  492. {
  493. u32 count = 0;
  494. u32 i = 0;
  495. struct synx_handle_coredata **synx_datas;
  496. struct synx_coredata **synx_objs;
  497. struct dma_fence **fences = NULL;
  498. if (num_objs <= 1) {
  499. dprintk(SYNX_ERR, "single handle merge is not allowed\n");
  500. return -SYNX_INVALID;
  501. }
  502. synx_datas = kcalloc(num_objs, sizeof(*synx_datas), GFP_KERNEL);
  503. if (IS_ERR_OR_NULL(synx_datas))
  504. return -SYNX_NOMEM;
  505. synx_objs = kcalloc(num_objs, sizeof(*synx_objs), GFP_KERNEL);
  506. if (IS_ERR_OR_NULL(synx_objs)) {
  507. kfree(synx_datas);
  508. return -SYNX_NOMEM;
  509. }
  510. for (i = 0; i < num_objs; i++) {
  511. synx_datas[i] = synx_util_acquire_handle(client, h_synxs[i]);
  512. synx_objs[i] = synx_util_obtain_object(synx_datas[i]);
  513. if (IS_ERR_OR_NULL(synx_objs[i]) ||
  514. IS_ERR_OR_NULL(synx_objs[i]->fence)) {
  515. dprintk(SYNX_ERR,
  516. "[sess :%llu] invalid handle %d in merge list\n",
  517. client->id, h_synxs[i]);
  518. *fence_cnt = i;
  519. goto error;
  520. }
  521. count += synx_util_get_references(synx_objs[i]);
  522. }
  523. fences = kcalloc(count, sizeof(*fences), GFP_KERNEL);
  524. if (IS_ERR_OR_NULL(fences)) {
  525. *fence_cnt = num_objs;
  526. goto error;
  527. }
  528. /* memory will be released later in the invoking function */
  529. *fence_list = fences;
  530. count = 0;
  531. for (i = 0; i < num_objs; i++) {
  532. count += synx_util_add_fence(synx_objs[i], fences, count);
  533. /* release the reference obtained earlier in the function */
  534. synx_util_release_handle(synx_datas[i]);
  535. }
  536. *fence_cnt = synx_util_remove_duplicates(fences, count);
  537. kfree(synx_objs);
  538. kfree(synx_datas);
  539. return 0;
  540. error:
  541. /* release the reference/s obtained earlier in the function */
  542. for (i = 0; i < *fence_cnt; i++) {
  543. synx_util_put_references(synx_objs[i]);
  544. synx_util_release_handle(synx_datas[i]);
  545. }
  546. *fence_cnt = 0;
  547. kfree(synx_objs);
  548. kfree(synx_datas);
  549. return -SYNX_INVALID;
  550. }
  551. static u32 __fence_state(struct dma_fence *fence, bool locked)
  552. {
  553. s32 status;
  554. u32 state = SYNX_STATE_INVALID;
  555. if (IS_ERR_OR_NULL(fence)) {
  556. dprintk(SYNX_ERR, "invalid fence\n");
  557. return SYNX_STATE_INVALID;
  558. }
  559. if (locked)
  560. status = dma_fence_get_status_locked(fence);
  561. else
  562. status = dma_fence_get_status(fence);
  563. /* convert fence status to synx state */
  564. switch (status) {
  565. case 0:
  566. state = SYNX_STATE_ACTIVE;
  567. break;
  568. case 1:
  569. state = SYNX_STATE_SIGNALED_SUCCESS;
  570. break;
  571. case -SYNX_STATE_SIGNALED_CANCEL:
  572. state = SYNX_STATE_SIGNALED_CANCEL;
  573. break;
  574. case -SYNX_STATE_SIGNALED_EXTERNAL:
  575. state = SYNX_STATE_SIGNALED_EXTERNAL;
  576. break;
  577. case -SYNX_STATE_SIGNALED_ERROR:
  578. state = SYNX_STATE_SIGNALED_ERROR;
  579. break;
  580. default:
  581. state = (u32)(-status);
  582. }
  583. return state;
  584. }
  585. static u32 __fence_group_state(struct dma_fence *fence, bool locked)
  586. {
  587. u32 i = 0;
  588. u32 state = SYNX_STATE_INVALID;
  589. struct dma_fence_array *array = NULL;
  590. u32 intr, actv_cnt, sig_cnt, err_cnt;
  591. if (IS_ERR_OR_NULL(fence)) {
  592. dprintk(SYNX_ERR, "invalid fence\n");
  593. return SYNX_STATE_INVALID;
  594. }
  595. actv_cnt = sig_cnt = err_cnt = 0;
  596. array = to_dma_fence_array(fence);
  597. if (IS_ERR_OR_NULL(array))
  598. return SYNX_STATE_INVALID;
  599. for (i = 0; i < array->num_fences; i++) {
  600. intr = __fence_state(array->fences[i], locked);
  601. switch (intr) {
  602. case SYNX_STATE_ACTIVE:
  603. actv_cnt++;
  604. break;
  605. case SYNX_STATE_SIGNALED_SUCCESS:
  606. sig_cnt++;
  607. break;
  608. default:
  609. err_cnt++;
  610. }
  611. }
  612. dprintk(SYNX_DBG,
  613. "group cnt stats act:%u, sig: %u, err: %u\n",
  614. actv_cnt, sig_cnt, err_cnt);
  615. if (err_cnt)
  616. state = SYNX_STATE_SIGNALED_ERROR;
  617. else if (actv_cnt)
  618. state = SYNX_STATE_ACTIVE;
  619. else if (sig_cnt == array->num_fences)
  620. state = SYNX_STATE_SIGNALED_SUCCESS;
  621. return state;
  622. }
  623. /*
  624. * WARN: Should not hold the fence spinlock when invoking
  625. * this function. Use synx_fence_state_locked instead
  626. */
  627. u32 synx_util_get_object_status(struct synx_coredata *synx_obj)
  628. {
  629. u32 state;
  630. if (IS_ERR_OR_NULL(synx_obj))
  631. return SYNX_STATE_INVALID;
  632. if (synx_util_is_merged_object(synx_obj))
  633. state = __fence_group_state(synx_obj->fence, false);
  634. else
  635. state = __fence_state(synx_obj->fence, false);
  636. return state;
  637. }
  638. /* use this for status check when holding on to metadata spinlock */
  639. u32 synx_util_get_object_status_locked(struct synx_coredata *synx_obj)
  640. {
  641. u32 state;
  642. if (IS_ERR_OR_NULL(synx_obj))
  643. return SYNX_STATE_INVALID;
  644. if (synx_util_is_merged_object(synx_obj))
  645. state = __fence_group_state(synx_obj->fence, true);
  646. else
  647. state = __fence_state(synx_obj->fence, true);
  648. return state;
  649. }
  650. struct synx_handle_coredata *synx_util_acquire_handle(
  651. struct synx_client *client, u32 h_synx)
  652. {
  653. struct synx_handle_coredata *synx_data = NULL;
  654. struct synx_handle_coredata *synx_handle =
  655. ERR_PTR(-SYNX_NOENT);
  656. if (IS_ERR_OR_NULL(client))
  657. return ERR_PTR(-SYNX_INVALID);
  658. spin_lock_bh(&client->handle_map_lock);
  659. hash_for_each_possible(client->handle_map,
  660. synx_data, node, h_synx) {
  661. if (synx_data->key == h_synx &&
  662. synx_data->rel_count != 0) {
  663. kref_get(&synx_data->refcount);
  664. synx_handle = synx_data;
  665. break;
  666. }
  667. }
  668. spin_unlock_bh(&client->handle_map_lock);
  669. return synx_handle;
  670. }
  671. struct synx_map_entry *synx_util_insert_to_map(
  672. struct synx_coredata *synx_obj,
  673. u32 h_synx, u32 flags)
  674. {
  675. struct synx_map_entry *map_entry;
  676. map_entry = kzalloc(sizeof(*map_entry), GFP_KERNEL);
  677. if (IS_ERR_OR_NULL(map_entry))
  678. return ERR_PTR(-SYNX_NOMEM);
  679. kref_init(&map_entry->refcount);
  680. map_entry->synx_obj = synx_obj;
  681. map_entry->flags = flags;
  682. map_entry->key = h_synx;
  683. if (synx_util_is_global_handle(h_synx)) {
  684. spin_lock_bh(&synx_dev->native->global_map_lock);
  685. hash_add(synx_dev->native->global_map,
  686. &map_entry->node, h_synx);
  687. spin_unlock_bh(&synx_dev->native->global_map_lock);
  688. dprintk(SYNX_MEM,
  689. "added handle %u to global map %pK\n",
  690. h_synx, map_entry);
  691. } else {
  692. spin_lock_bh(&synx_dev->native->local_map_lock);
  693. hash_add(synx_dev->native->local_map,
  694. &map_entry->node, h_synx);
  695. spin_unlock_bh(&synx_dev->native->local_map_lock);
  696. dprintk(SYNX_MEM,
  697. "added handle %u to local map %pK\n",
  698. h_synx, map_entry);
  699. }
  700. return map_entry;
  701. }
  702. struct synx_map_entry *synx_util_get_map_entry(u32 h_synx)
  703. {
  704. struct synx_map_entry *curr;
  705. struct synx_map_entry *map_entry = ERR_PTR(-SYNX_NOENT);
  706. if (h_synx == 0)
  707. return ERR_PTR(-SYNX_INVALID);
  708. if (synx_util_is_global_handle(h_synx)) {
  709. spin_lock_bh(&synx_dev->native->global_map_lock);
  710. hash_for_each_possible(synx_dev->native->global_map,
  711. curr, node, h_synx) {
  712. if (curr->key == h_synx) {
  713. kref_get(&curr->refcount);
  714. map_entry = curr;
  715. break;
  716. }
  717. }
  718. spin_unlock_bh(&synx_dev->native->global_map_lock);
  719. } else {
  720. spin_lock_bh(&synx_dev->native->local_map_lock);
  721. hash_for_each_possible(synx_dev->native->local_map,
  722. curr, node, h_synx) {
  723. if (curr->key == h_synx) {
  724. kref_get(&curr->refcount);
  725. map_entry = curr;
  726. break;
  727. }
  728. }
  729. spin_unlock_bh(&synx_dev->native->local_map_lock);
  730. }
  731. /* should we allocate if entry not found? */
  732. return map_entry;
  733. }
  734. static void synx_util_cleanup_fence(
  735. struct synx_coredata *synx_obj)
  736. {
  737. struct synx_signal_cb *signal_cb;
  738. unsigned long flags;
  739. u32 g_status;
  740. u32 f_status;
  741. mutex_lock(&synx_obj->obj_lock);
  742. synx_obj->map_count--;
  743. signal_cb = synx_obj->signal_cb;
  744. f_status = synx_util_get_object_status(synx_obj);
  745. dprintk(SYNX_VERB, "f_status:%u, signal_cb:%p, map:%u, idx:%u\n",
  746. f_status, signal_cb, synx_obj->map_count, synx_obj->global_idx);
  747. if (synx_obj->map_count == 0 &&
  748. (signal_cb != NULL) &&
  749. (synx_obj->global_idx != 0) &&
  750. (f_status == SYNX_STATE_ACTIVE)) {
  751. /*
  752. * no more clients interested for notification
  753. * on handle on local core.
  754. * remove reference held by callback on synx
  755. * coredata structure and update cb (if still
  756. * un-signaled) with global handle idx to
  757. * notify any cross-core clients waiting on
  758. * handle.
  759. */
  760. g_status = synx_global_get_status(synx_obj->global_idx);
  761. if (g_status > SYNX_STATE_ACTIVE) {
  762. dprintk(SYNX_DBG, "signaling fence %pK with status %u\n",
  763. synx_obj->fence, g_status);
  764. synx_native_signal_fence(synx_obj, g_status);
  765. } else {
  766. spin_lock_irqsave(synx_obj->fence->lock, flags);
  767. if (synx_util_get_object_status_locked(synx_obj) ==
  768. SYNX_STATE_ACTIVE) {
  769. signal_cb->synx_obj = NULL;
  770. synx_obj->signal_cb = NULL;
  771. /*
  772. * release reference held by signal cb and
  773. * get reference on global index instead.
  774. */
  775. synx_util_put_object(synx_obj);
  776. synx_global_get_ref(synx_obj->global_idx);
  777. }
  778. spin_unlock_irqrestore(synx_obj->fence->lock, flags);
  779. }
  780. } else if (synx_obj->map_count == 0 && signal_cb &&
  781. (f_status == SYNX_STATE_ACTIVE)) {
  782. if (dma_fence_remove_callback(synx_obj->fence,
  783. &signal_cb->fence_cb)) {
  784. kfree(signal_cb);
  785. synx_obj->signal_cb = NULL;
  786. /*
  787. * release reference held by signal cb and
  788. * get reference on global index instead.
  789. */
  790. synx_util_put_object(synx_obj);
  791. dprintk(SYNX_MEM, "signal cb destroyed %pK\n",
  792. synx_obj->signal_cb);
  793. }
  794. }
  795. mutex_unlock(&synx_obj->obj_lock);
  796. }
  797. static void synx_util_destroy_map_entry_worker(
  798. struct work_struct *dispatch)
  799. {
  800. struct synx_map_entry *map_entry =
  801. container_of(dispatch, struct synx_map_entry, dispatch);
  802. struct synx_coredata *synx_obj;
  803. synx_obj = map_entry->synx_obj;
  804. if (!IS_ERR_OR_NULL(synx_obj)) {
  805. synx_util_cleanup_fence(synx_obj);
  806. /* release reference held by map entry */
  807. synx_util_put_object(synx_obj);
  808. }
  809. if (!synx_util_is_global_handle(map_entry->key))
  810. clear_bit(synx_util_global_idx(map_entry->key),
  811. synx_dev->native->bitmap);
  812. dprintk(SYNX_VERB, "map entry for %u destroyed %pK\n",
  813. map_entry->key, map_entry);
  814. kfree(map_entry);
  815. }
  816. static void synx_util_destroy_map_entry(struct kref *kref)
  817. {
  818. struct synx_map_entry *map_entry =
  819. container_of(kref, struct synx_map_entry, refcount);
  820. hash_del(&map_entry->node);
  821. dprintk(SYNX_MEM, "map entry for %u removed %pK\n",
  822. map_entry->key, map_entry);
  823. INIT_WORK(&map_entry->dispatch, synx_util_destroy_map_entry_worker);
  824. queue_work(synx_dev->wq_cleanup, &map_entry->dispatch);
  825. }
  826. void synx_util_release_map_entry(struct synx_map_entry *map_entry)
  827. {
  828. spinlock_t *lock;
  829. if (IS_ERR_OR_NULL(map_entry))
  830. return;
  831. if (synx_util_is_global_handle(map_entry->key))
  832. lock = &synx_dev->native->global_map_lock;
  833. else
  834. lock = &synx_dev->native->local_map_lock;
  835. spin_lock_bh(lock);
  836. kref_put(&map_entry->refcount,
  837. synx_util_destroy_map_entry);
  838. spin_unlock_bh(lock);
  839. }
  840. static void synx_util_destroy_handle_worker(
  841. struct work_struct *dispatch)
  842. {
  843. struct synx_handle_coredata *synx_data =
  844. container_of(dispatch, struct synx_handle_coredata,
  845. dispatch);
  846. synx_util_release_map_entry(synx_data->map_entry);
  847. dprintk(SYNX_VERB, "handle %u destroyed %pK\n",
  848. synx_data->key, synx_data);
  849. kfree(synx_data);
  850. }
  851. static void synx_util_destroy_handle(struct kref *kref)
  852. {
  853. struct synx_handle_coredata *synx_data =
  854. container_of(kref, struct synx_handle_coredata,
  855. refcount);
  856. hash_del(&synx_data->node);
  857. dprintk(SYNX_MEM, "[sess :%llu] handle %u removed %pK\n",
  858. synx_data->client->id, synx_data->key, synx_data);
  859. INIT_WORK(&synx_data->dispatch, synx_util_destroy_handle_worker);
  860. queue_work(synx_dev->wq_cleanup, &synx_data->dispatch);
  861. }
  862. void synx_util_release_handle(struct synx_handle_coredata *synx_data)
  863. {
  864. struct synx_client *client;
  865. if (IS_ERR_OR_NULL(synx_data))
  866. return;
  867. client = synx_data->client;
  868. if (IS_ERR_OR_NULL(client))
  869. return;
  870. spin_lock_bh(&client->handle_map_lock);
  871. kref_put(&synx_data->refcount,
  872. synx_util_destroy_handle);
  873. spin_unlock_bh(&client->handle_map_lock);
  874. }
  875. struct bind_operations *synx_util_get_bind_ops(u32 type)
  876. {
  877. struct synx_registered_ops *client_ops;
  878. if (!synx_util_is_valid_bind_type(type))
  879. return NULL;
  880. mutex_lock(&synx_dev->vtbl_lock);
  881. client_ops = &synx_dev->bind_vtbl[type];
  882. if (!client_ops->valid) {
  883. mutex_unlock(&synx_dev->vtbl_lock);
  884. return NULL;
  885. }
  886. mutex_unlock(&synx_dev->vtbl_lock);
  887. return &client_ops->ops;
  888. }
  889. int synx_util_alloc_cb_entry(struct synx_client *client,
  890. struct synx_kernel_payload *data,
  891. u32 *cb_idx)
  892. {
  893. long idx;
  894. struct synx_client_cb *cb;
  895. if (IS_ERR_OR_NULL(client) || IS_ERR_OR_NULL(data) ||
  896. IS_ERR_OR_NULL(cb_idx))
  897. return -SYNX_INVALID;
  898. idx = synx_util_get_free_handle(client->cb_bitmap, SYNX_MAX_OBJS);
  899. if (idx >= SYNX_MAX_OBJS) {
  900. dprintk(SYNX_ERR,
  901. "[sess :%llu] free cb index not available\n",
  902. client->id);
  903. return -SYNX_NOMEM;
  904. }
  905. cb = &client->cb_table[idx];
  906. memset(cb, 0, sizeof(*cb));
  907. cb->is_valid = true;
  908. cb->client = client;
  909. cb->idx = idx;
  910. memcpy(&cb->kernel_cb, data,
  911. sizeof(cb->kernel_cb));
  912. *cb_idx = idx;
  913. dprintk(SYNX_VERB, "[sess :%llu] allocated cb index %u\n",
  914. client->id, *cb_idx);
  915. return 0;
  916. }
  917. int synx_util_clear_cb_entry(struct synx_client *client,
  918. struct synx_client_cb *cb)
  919. {
  920. int rc = 0;
  921. u32 idx;
  922. if (IS_ERR_OR_NULL(cb))
  923. return -SYNX_INVALID;
  924. idx = cb->idx;
  925. memset(cb, 0, sizeof(*cb));
  926. if (idx && idx < SYNX_MAX_OBJS) {
  927. clear_bit(idx, client->cb_bitmap);
  928. } else {
  929. dprintk(SYNX_ERR, "invalid index\n");
  930. rc = -SYNX_INVALID;
  931. }
  932. return rc;
  933. }
  934. void synx_util_default_user_callback(u32 h_synx,
  935. int status, void *data)
  936. {
  937. struct synx_client_cb *cb = data;
  938. struct synx_client *client = NULL;
  939. if (cb && cb->client) {
  940. client = cb->client;
  941. dprintk(SYNX_VERB,
  942. "[sess :%llu] user cb queued for handle %d\n",
  943. client->id, h_synx);
  944. cb->kernel_cb.status = status;
  945. mutex_lock(&client->event_q_lock);
  946. list_add_tail(&cb->node, &client->event_q);
  947. mutex_unlock(&client->event_q_lock);
  948. wake_up_all(&client->event_wq);
  949. } else {
  950. dprintk(SYNX_ERR, "invalid params\n");
  951. }
  952. }
  953. void synx_util_callback_dispatch(struct synx_coredata *synx_obj, u32 status)
  954. {
  955. struct synx_cb_data *synx_cb, *synx_cb_temp;
  956. if (IS_ERR_OR_NULL(synx_obj)) {
  957. dprintk(SYNX_ERR, "invalid arguments\n");
  958. return;
  959. }
  960. list_for_each_entry_safe(synx_cb,
  961. synx_cb_temp, &synx_obj->reg_cbs_list, node) {
  962. synx_cb->status = status;
  963. list_del_init(&synx_cb->node);
  964. queue_work(synx_dev->wq_cb,
  965. &synx_cb->cb_dispatch);
  966. dprintk(SYNX_VERB, "dispatched callback\n");
  967. }
  968. }
  969. void synx_util_cb_dispatch(struct work_struct *cb_dispatch)
  970. {
  971. struct synx_cb_data *synx_cb =
  972. container_of(cb_dispatch, struct synx_cb_data, cb_dispatch);
  973. struct synx_client *client;
  974. struct synx_client_cb *cb;
  975. struct synx_kernel_payload payload;
  976. u32 status;
  977. client = synx_get_client(synx_cb->session);
  978. if (IS_ERR_OR_NULL(client)) {
  979. dprintk(SYNX_ERR,
  980. "invalid session data %pK in cb payload\n",
  981. synx_cb->session);
  982. goto free;
  983. }
  984. if (synx_cb->idx == 0 ||
  985. synx_cb->idx >= SYNX_MAX_OBJS) {
  986. dprintk(SYNX_ERR,
  987. "[sess :%llu] invalid cb index %u\n",
  988. client->id, synx_cb->idx);
  989. goto fail;
  990. }
  991. status = synx_cb->status;
  992. cb = &client->cb_table[synx_cb->idx];
  993. if (!cb->is_valid) {
  994. dprintk(SYNX_ERR, "invalid cb payload\n");
  995. goto fail;
  996. }
  997. memcpy(&payload, &cb->kernel_cb, sizeof(cb->kernel_cb));
  998. payload.status = status;
  999. if (payload.cb_func == synx_util_default_user_callback) {
  1000. /*
  1001. * need to send client cb data for default
  1002. * user cb (userspace cb)
  1003. */
  1004. payload.data = cb;
  1005. } else {
  1006. /*
  1007. * clear the cb entry. userspace cb entry
  1008. * will be cleared after data read by the
  1009. * polling thread or when client is destroyed
  1010. */
  1011. if (synx_util_clear_cb_entry(client, cb))
  1012. dprintk(SYNX_ERR,
  1013. "[sess :%llu] error clearing cb entry\n",
  1014. client->id);
  1015. }
  1016. dprintk(SYNX_DBG,
  1017. "callback dispatched for handle %u, status %u, data %pK\n",
  1018. payload.h_synx, payload.status, payload.data);
  1019. /* dispatch kernel callback */
  1020. payload.cb_func(payload.h_synx,
  1021. payload.status, payload.data);
  1022. fail:
  1023. synx_put_client(client);
  1024. free:
  1025. kfree(synx_cb);
  1026. }
  1027. int synx_get_child_coredata(struct synx_coredata *synx_obj, struct synx_coredata ***child_synx_obj, int *num_fences)
  1028. {
  1029. int rc = SYNX_SUCCESS;
  1030. int i = 0, handle_count = 0;
  1031. u32 h_child = 0;
  1032. struct dma_fence_array *array = NULL;
  1033. struct synx_coredata **synx_datas = NULL;
  1034. struct synx_map_entry *fence_entry = NULL;
  1035. if (IS_ERR_OR_NULL(synx_obj) || IS_ERR_OR_NULL(num_fences))
  1036. return -SYNX_INVALID;
  1037. if (dma_fence_is_array(synx_obj->fence)) {
  1038. array = to_dma_fence_array(synx_obj->fence);
  1039. if (IS_ERR_OR_NULL(array))
  1040. return -SYNX_INVALID;
  1041. synx_datas = kcalloc(array->num_fences, sizeof(*synx_datas), GFP_KERNEL);
  1042. if (IS_ERR_OR_NULL(synx_datas))
  1043. return -SYNX_NOMEM;
  1044. for (i = 0; i < array->num_fences; i++) {
  1045. h_child = synx_util_get_fence_entry((u64)array->fences[i], 1);
  1046. fence_entry = synx_util_get_map_entry(h_child);
  1047. if (IS_ERR_OR_NULL(fence_entry) || IS_ERR_OR_NULL(fence_entry->synx_obj))
  1048. {
  1049. dprintk(SYNX_ERR, "Invalid handle access %u", h_child);
  1050. rc = -SYNX_NOENT;
  1051. goto fail;
  1052. }
  1053. synx_datas[handle_count++] = fence_entry->synx_obj;
  1054. synx_util_release_map_entry(fence_entry);
  1055. }
  1056. }
  1057. *child_synx_obj = synx_datas;
  1058. *num_fences = handle_count;
  1059. return rc;
  1060. fail:
  1061. kfree(synx_datas);
  1062. return rc;
  1063. }
  1064. u32 synx_util_get_fence_entry(u64 key, u32 global)
  1065. {
  1066. u32 h_synx = 0;
  1067. struct synx_fence_entry *curr;
  1068. spin_lock_bh(&synx_dev->native->fence_map_lock);
  1069. hash_for_each_possible(synx_dev->native->fence_map,
  1070. curr, node, key) {
  1071. if (curr->key == key) {
  1072. if (global)
  1073. h_synx = curr->g_handle;
  1074. /* return local handle if global not available */
  1075. if (h_synx == 0)
  1076. h_synx = curr->l_handle;
  1077. break;
  1078. }
  1079. }
  1080. spin_unlock_bh(&synx_dev->native->fence_map_lock);
  1081. return h_synx;
  1082. }
  1083. void synx_util_release_fence_entry(u64 key)
  1084. {
  1085. struct synx_fence_entry *entry = NULL, *curr;
  1086. spin_lock_bh(&synx_dev->native->fence_map_lock);
  1087. hash_for_each_possible(synx_dev->native->fence_map,
  1088. curr, node, key) {
  1089. if (curr->key == key) {
  1090. entry = curr;
  1091. break;
  1092. }
  1093. }
  1094. if (entry) {
  1095. hash_del(&entry->node);
  1096. dprintk(SYNX_MEM,
  1097. "released fence entry %pK for fence %pK\n",
  1098. entry, (void *)key);
  1099. kfree(entry);
  1100. }
  1101. spin_unlock_bh(&synx_dev->native->fence_map_lock);
  1102. }
  1103. int synx_util_insert_fence_entry(struct synx_fence_entry *entry,
  1104. u32 *h_synx, u32 global)
  1105. {
  1106. int rc = SYNX_SUCCESS;
  1107. struct synx_fence_entry *curr;
  1108. if (IS_ERR_OR_NULL(entry) || IS_ERR_OR_NULL(h_synx))
  1109. return -SYNX_INVALID;
  1110. spin_lock_bh(&synx_dev->native->fence_map_lock);
  1111. hash_for_each_possible(synx_dev->native->fence_map,
  1112. curr, node, entry->key) {
  1113. /* raced with import from another process on same fence */
  1114. if (curr->key == entry->key) {
  1115. if (global)
  1116. *h_synx = curr->g_handle;
  1117. if (*h_synx == 0 || !global)
  1118. *h_synx = curr->l_handle;
  1119. rc = -SYNX_ALREADY;
  1120. break;
  1121. }
  1122. }
  1123. /* add entry only if its not present in the map */
  1124. if (rc == SYNX_SUCCESS) {
  1125. hash_add(synx_dev->native->fence_map,
  1126. &entry->node, entry->key);
  1127. dprintk(SYNX_MEM,
  1128. "added fence entry %pK for fence %pK\n",
  1129. entry, (void *)entry->key);
  1130. }
  1131. spin_unlock_bh(&synx_dev->native->fence_map_lock);
  1132. return rc;
  1133. }
  1134. struct synx_client *synx_get_client(struct synx_session *session)
  1135. {
  1136. struct synx_client *client = NULL;
  1137. struct synx_client *curr;
  1138. if (IS_ERR_OR_NULL(session))
  1139. return ERR_PTR(-SYNX_INVALID);
  1140. spin_lock_bh(&synx_dev->native->metadata_map_lock);
  1141. hash_for_each_possible(synx_dev->native->client_metadata_map,
  1142. curr, node, (u64)session) {
  1143. if (curr == (struct synx_client *)session) {
  1144. if (curr->active) {
  1145. kref_get(&curr->refcount);
  1146. client = curr;
  1147. }
  1148. break;
  1149. }
  1150. }
  1151. spin_unlock_bh(&synx_dev->native->metadata_map_lock);
  1152. return client;
  1153. }
  1154. static void synx_client_cleanup(struct work_struct *dispatch)
  1155. {
  1156. int i, j;
  1157. struct synx_client *client =
  1158. container_of(dispatch, struct synx_client, dispatch);
  1159. struct synx_handle_coredata *curr;
  1160. struct hlist_node *tmp;
  1161. dprintk(SYNX_INFO, "[sess :%llu] session removed %s\n",
  1162. client->id, client->name);
  1163. /*
  1164. * go over all the remaining synx obj handles
  1165. * un-released from this session and remove them.
  1166. */
  1167. hash_for_each_safe(client->handle_map, i, tmp, curr, node) {
  1168. dprintk(SYNX_WARN,
  1169. "[sess :%llu] un-released handle %u\n",
  1170. client->id, curr->key);
  1171. j = kref_read(&curr->refcount);
  1172. /* release pending reference */
  1173. while (j--)
  1174. kref_put(&curr->refcount, synx_util_destroy_handle);
  1175. }
  1176. mutex_destroy(&client->event_q_lock);
  1177. dprintk(SYNX_VERB, "session %llu [%s] destroyed %pK\n",
  1178. client->id, client->name, client);
  1179. vfree(client);
  1180. }
  1181. static void synx_client_destroy(struct kref *kref)
  1182. {
  1183. struct synx_client *client =
  1184. container_of(kref, struct synx_client, refcount);
  1185. hash_del(&client->node);
  1186. INIT_WORK(&client->dispatch, synx_client_cleanup);
  1187. queue_work(synx_dev->wq_cleanup, &client->dispatch);
  1188. }
  1189. void synx_put_client(struct synx_client *client)
  1190. {
  1191. if (IS_ERR_OR_NULL(client))
  1192. return;
  1193. spin_lock_bh(&synx_dev->native->metadata_map_lock);
  1194. kref_put(&client->refcount, synx_client_destroy);
  1195. spin_unlock_bh(&synx_dev->native->metadata_map_lock);
  1196. }
  1197. void synx_util_generate_timestamp(char *timestamp, size_t size)
  1198. {
  1199. struct timespec64 tv;
  1200. struct tm tm;
  1201. ktime_get_real_ts64(&tv);
  1202. time64_to_tm(tv.tv_sec, 0, &tm);
  1203. snprintf(timestamp, size, "%02d-%02d %02d:%02d:%02d",
  1204. tm.tm_mon + 1, tm.tm_mday, tm.tm_hour,
  1205. tm.tm_min, tm.tm_sec);
  1206. }
  1207. void synx_util_log_error(u32 client_id, u32 h_synx, s32 err)
  1208. {
  1209. struct error_node *err_node;
  1210. if (!synx_dev->debugfs_root)
  1211. return;
  1212. err_node = kzalloc(sizeof(*err_node), GFP_KERNEL);
  1213. if (!err_node)
  1214. return;
  1215. err_node->client_id = client_id;
  1216. err_node->error_code = err;
  1217. err_node->h_synx = h_synx;
  1218. synx_util_generate_timestamp(err_node->timestamp,
  1219. sizeof(err_node->timestamp));
  1220. mutex_lock(&synx_dev->error_lock);
  1221. list_add(&err_node->node,
  1222. &synx_dev->error_list);
  1223. mutex_unlock(&synx_dev->error_lock);
  1224. }
  1225. int synx_util_save_data(void *fence, u32 flags,
  1226. u32 h_synx)
  1227. {
  1228. int rc = SYNX_SUCCESS;
  1229. struct synx_entry_64 *entry, *curr;
  1230. u64 key;
  1231. u32 tbl = synx_util_map_params_to_type(flags);
  1232. switch (tbl) {
  1233. case SYNX_TYPE_CSL:
  1234. key = *(u32 *)fence;
  1235. spin_lock_bh(&synx_dev->native->csl_map_lock);
  1236. /* ensure fence is not already added to map */
  1237. hash_for_each_possible(synx_dev->native->csl_fence_map,
  1238. curr, node, key) {
  1239. if (curr->key == key) {
  1240. rc = -SYNX_ALREADY;
  1241. break;
  1242. }
  1243. }
  1244. if (rc == SYNX_SUCCESS) {
  1245. entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
  1246. if (entry) {
  1247. entry->data[0] = h_synx;
  1248. entry->key = key;
  1249. kref_init(&entry->refcount);
  1250. hash_add(synx_dev->native->csl_fence_map,
  1251. &entry->node, entry->key);
  1252. dprintk(SYNX_MEM, "added csl fence %d to map %pK\n",
  1253. entry->key, entry);
  1254. } else {
  1255. rc = -SYNX_NOMEM;
  1256. }
  1257. }
  1258. spin_unlock_bh(&synx_dev->native->csl_map_lock);
  1259. break;
  1260. default:
  1261. dprintk(SYNX_ERR, "invalid hash table selection\n");
  1262. kfree(entry);
  1263. rc = -SYNX_INVALID;
  1264. }
  1265. return rc;
  1266. }
  1267. struct synx_entry_64 *synx_util_retrieve_data(void *fence,
  1268. u32 type)
  1269. {
  1270. u64 key;
  1271. struct synx_entry_64 *entry = NULL;
  1272. struct synx_entry_64 *curr;
  1273. switch (type) {
  1274. case SYNX_TYPE_CSL:
  1275. key = *(u32 *)fence;
  1276. spin_lock_bh(&synx_dev->native->csl_map_lock);
  1277. hash_for_each_possible(synx_dev->native->csl_fence_map,
  1278. curr, node, key) {
  1279. if (curr->key == key) {
  1280. kref_get(&curr->refcount);
  1281. entry = curr;
  1282. break;
  1283. }
  1284. }
  1285. spin_unlock_bh(&synx_dev->native->csl_map_lock);
  1286. break;
  1287. default:
  1288. dprintk(SYNX_ERR, "invalid hash table selection %u\n",
  1289. type);
  1290. }
  1291. return entry;
  1292. }
  1293. static void synx_util_destroy_data(struct kref *kref)
  1294. {
  1295. struct synx_entry_64 *entry =
  1296. container_of(kref, struct synx_entry_64, refcount);
  1297. hash_del(&entry->node);
  1298. dprintk(SYNX_MEM, "released fence %llu entry %pK\n",
  1299. entry->key, entry);
  1300. kfree(entry);
  1301. }
  1302. void synx_util_remove_data(void *fence,
  1303. u32 type)
  1304. {
  1305. u64 key;
  1306. struct synx_entry_64 *entry = NULL;
  1307. struct synx_entry_64 *curr;
  1308. if (IS_ERR_OR_NULL(fence))
  1309. return;
  1310. switch (type) {
  1311. case SYNX_TYPE_CSL:
  1312. key = *((u32 *)fence);
  1313. spin_lock_bh(&synx_dev->native->csl_map_lock);
  1314. hash_for_each_possible(synx_dev->native->csl_fence_map,
  1315. curr, node, key) {
  1316. if (curr->key == key) {
  1317. entry = curr;
  1318. break;
  1319. }
  1320. }
  1321. if (entry)
  1322. kref_put(&entry->refcount, synx_util_destroy_data);
  1323. spin_unlock_bh(&synx_dev->native->csl_map_lock);
  1324. break;
  1325. default:
  1326. dprintk(SYNX_ERR, "invalid hash table selection %u\n",
  1327. type);
  1328. }
  1329. }
  1330. void synx_util_map_import_params_to_create(
  1331. struct synx_import_indv_params *params,
  1332. struct synx_create_params *c_params)
  1333. {
  1334. if (IS_ERR_OR_NULL(params) || IS_ERR_OR_NULL(c_params))
  1335. return;
  1336. if (params->flags & SYNX_IMPORT_GLOBAL_FENCE)
  1337. c_params->flags |= SYNX_CREATE_GLOBAL_FENCE;
  1338. if (params->flags & SYNX_IMPORT_LOCAL_FENCE)
  1339. c_params->flags |= SYNX_CREATE_LOCAL_FENCE;
  1340. if (params->flags & SYNX_IMPORT_DMA_FENCE)
  1341. c_params->flags |= SYNX_CREATE_DMA_FENCE;
  1342. }
  1343. u32 synx_util_map_client_id_to_core(
  1344. enum synx_client_id id)
  1345. {
  1346. u32 core_id;
  1347. switch (id) {
  1348. case SYNX_CLIENT_NATIVE:
  1349. core_id = SYNX_CORE_APSS; break;
  1350. case SYNX_CLIENT_ICP_CTX0:
  1351. core_id = SYNX_CORE_ICP; break;
  1352. case SYNX_CLIENT_EVA_CTX0:
  1353. core_id = SYNX_CORE_EVA; break;
  1354. case SYNX_CLIENT_VID_CTX0:
  1355. core_id = SYNX_CORE_IRIS; break;
  1356. case SYNX_CLIENT_NSP_CTX0:
  1357. core_id = SYNX_CORE_NSP; break;
  1358. default:
  1359. core_id = SYNX_CORE_MAX;
  1360. }
  1361. return core_id;
  1362. }