synx_util.c 40 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2019-2021, The Linux Foundation. All rights reserved.
  4. * Copyright (c) 2022-2023, Qualcomm Innovation Center, Inc. All rights reserved.
  5. */
  6. #include <linux/slab.h>
  7. #include <linux/random.h>
  8. #include <linux/vmalloc.h>
  9. #include "synx_debugfs.h"
  10. #include "synx_util.h"
  11. #include "synx_private.h"
  12. extern void synx_external_callback(s32 sync_obj, int status, void *data);
  13. int synx_util_init_coredata(struct synx_coredata *synx_obj,
  14. struct synx_create_params *params,
  15. struct dma_fence_ops *ops,
  16. u64 dma_context)
  17. {
  18. int rc = -SYNX_INVALID;
  19. spinlock_t *fence_lock;
  20. struct dma_fence *fence;
  21. struct synx_fence_entry *entry;
  22. if (IS_ERR_OR_NULL(synx_obj) || IS_ERR_OR_NULL(params) ||
  23. IS_ERR_OR_NULL(ops) || IS_ERR_OR_NULL(params->h_synx))
  24. return -SYNX_INVALID;
  25. if (params->flags & SYNX_CREATE_GLOBAL_FENCE &&
  26. *params->h_synx != 0) {
  27. rc = synx_global_get_ref(
  28. synx_util_global_idx(*params->h_synx));
  29. synx_obj->global_idx = synx_util_global_idx(*params->h_synx);
  30. } else if (params->flags & SYNX_CREATE_GLOBAL_FENCE) {
  31. rc = synx_alloc_global_handle(params->h_synx);
  32. synx_obj->global_idx = synx_util_global_idx(*params->h_synx);
  33. } else {
  34. rc = synx_alloc_local_handle(params->h_synx);
  35. }
  36. if (rc != SYNX_SUCCESS)
  37. return rc;
  38. synx_obj->map_count = 1;
  39. synx_obj->num_bound_synxs = 0;
  40. synx_obj->type |= params->flags;
  41. kref_init(&synx_obj->refcount);
  42. mutex_init(&synx_obj->obj_lock);
  43. INIT_LIST_HEAD(&synx_obj->reg_cbs_list);
  44. if (params->name)
  45. strlcpy(synx_obj->name, params->name, sizeof(synx_obj->name));
  46. if (params->flags & SYNX_CREATE_DMA_FENCE) {
  47. fence = (struct dma_fence *)params->fence;
  48. if (IS_ERR_OR_NULL(fence)) {
  49. dprintk(SYNX_ERR, "invalid external fence\n");
  50. goto free;
  51. }
  52. dma_fence_get(fence);
  53. synx_obj->fence = fence;
  54. } else {
  55. /*
  56. * lock and fence memory will be released in fence
  57. * release function
  58. */
  59. fence_lock = kzalloc(sizeof(*fence_lock), GFP_KERNEL);
  60. if (IS_ERR_OR_NULL(fence_lock)) {
  61. rc = -SYNX_NOMEM;
  62. goto free;
  63. }
  64. fence = kzalloc(sizeof(*fence), GFP_KERNEL);
  65. if (IS_ERR_OR_NULL(fence)) {
  66. kfree(fence_lock);
  67. rc = -SYNX_NOMEM;
  68. goto free;
  69. }
  70. spin_lock_init(fence_lock);
  71. dma_fence_init(fence, ops, fence_lock, dma_context, 1);
  72. synx_obj->fence = fence;
  73. synx_util_activate(synx_obj);
  74. dprintk(SYNX_MEM,
  75. "allocated backing fence %pK\n", fence);
  76. entry = kzalloc(sizeof(*entry), GFP_KERNEL);
  77. if (IS_ERR_OR_NULL(entry)) {
  78. rc = -SYNX_NOMEM;
  79. goto clean;
  80. }
  81. entry->key = (u64)fence;
  82. if (params->flags & SYNX_CREATE_GLOBAL_FENCE)
  83. entry->g_handle = *params->h_synx;
  84. else
  85. entry->l_handle = *params->h_synx;
  86. rc = synx_util_insert_fence_entry(entry,
  87. params->h_synx,
  88. params->flags & SYNX_CREATE_GLOBAL_FENCE);
  89. BUG_ON(rc != SYNX_SUCCESS);
  90. }
  91. if (rc != SYNX_SUCCESS)
  92. goto clean;
  93. synx_obj->status = synx_util_get_object_status(synx_obj);
  94. return SYNX_SUCCESS;
  95. clean:
  96. dma_fence_put(fence);
  97. free:
  98. if (params->flags & SYNX_CREATE_GLOBAL_FENCE)
  99. synx_global_put_ref(
  100. synx_util_global_idx(*params->h_synx));
  101. else
  102. clear_bit(synx_util_global_idx(*params->h_synx),
  103. synx_dev->native->bitmap);
  104. return rc;
  105. }
  106. int synx_util_add_callback(struct synx_coredata *synx_obj,
  107. u32 h_synx)
  108. {
  109. int rc;
  110. struct synx_signal_cb *signal_cb;
  111. if (IS_ERR_OR_NULL(synx_obj))
  112. return -SYNX_INVALID;
  113. signal_cb = kzalloc(sizeof(*signal_cb), GFP_KERNEL);
  114. if (IS_ERR_OR_NULL(signal_cb))
  115. return -SYNX_NOMEM;
  116. signal_cb->handle = h_synx;
  117. signal_cb->flag = SYNX_SIGNAL_FROM_FENCE;
  118. signal_cb->synx_obj = synx_obj;
  119. /* get reference on synx coredata for signal cb */
  120. synx_util_get_object(synx_obj);
  121. /*
  122. * adding callback enables synx framework to
  123. * get notified on signal from clients using
  124. * native dma fence operations.
  125. */
  126. rc = dma_fence_add_callback(synx_obj->fence,
  127. &signal_cb->fence_cb, synx_fence_callback);
  128. if (rc != 0) {
  129. if (rc == -ENOENT) {
  130. if (synx_util_is_global_object(synx_obj)) {
  131. /* signal (if) global handle */
  132. rc = synx_global_update_status(
  133. synx_obj->global_idx,
  134. synx_util_get_object_status(synx_obj));
  135. if (rc != SYNX_SUCCESS)
  136. dprintk(SYNX_ERR,
  137. "status update of %u with fence %pK\n",
  138. synx_obj->global_idx, synx_obj->fence);
  139. } else {
  140. rc = SYNX_SUCCESS;
  141. }
  142. } else {
  143. dprintk(SYNX_ERR,
  144. "error adding callback for %pK err %d\n",
  145. synx_obj->fence, rc);
  146. }
  147. synx_util_put_object(synx_obj);
  148. kfree(signal_cb);
  149. return rc;
  150. }
  151. synx_obj->signal_cb = signal_cb;
  152. dprintk(SYNX_VERB, "added callback %pK to fence %pK\n",
  153. signal_cb, synx_obj->fence);
  154. return SYNX_SUCCESS;
  155. }
  156. int synx_util_init_group_coredata(struct synx_coredata *synx_obj,
  157. struct dma_fence **fences,
  158. struct synx_merge_params *params,
  159. u32 num_objs,
  160. u64 dma_context)
  161. {
  162. int rc;
  163. struct dma_fence_array *array;
  164. if (IS_ERR_OR_NULL(synx_obj))
  165. return -SYNX_INVALID;
  166. if (params->flags & SYNX_MERGE_GLOBAL_FENCE) {
  167. rc = synx_alloc_global_handle(params->h_merged_obj);
  168. synx_obj->global_idx =
  169. synx_util_global_idx(*params->h_merged_obj);
  170. } else {
  171. rc = synx_alloc_local_handle(params->h_merged_obj);
  172. }
  173. if (rc != SYNX_SUCCESS)
  174. return rc;
  175. array = dma_fence_array_create(num_objs, fences,
  176. dma_context, 1, false);
  177. if (IS_ERR_OR_NULL(array))
  178. return -SYNX_INVALID;
  179. synx_obj->fence = &array->base;
  180. synx_obj->map_count = 1;
  181. synx_obj->type = params->flags;
  182. synx_obj->type |= SYNX_CREATE_MERGED_FENCE;
  183. synx_obj->num_bound_synxs = 0;
  184. kref_init(&synx_obj->refcount);
  185. mutex_init(&synx_obj->obj_lock);
  186. INIT_LIST_HEAD(&synx_obj->reg_cbs_list);
  187. synx_obj->status = synx_util_get_object_status(synx_obj);
  188. synx_util_activate(synx_obj);
  189. return rc;
  190. }
  191. void synx_util_destroy_coredata(struct kref *kref)
  192. {
  193. int rc;
  194. struct synx_coredata *synx_obj =
  195. container_of(kref, struct synx_coredata, refcount);
  196. if (synx_util_is_global_object(synx_obj)) {
  197. rc = synx_global_clear_subscribed_core(synx_obj->global_idx, SYNX_CORE_APSS);
  198. if (rc)
  199. dprintk(SYNX_ERR, "Failed to clear subscribers");
  200. synx_global_put_ref(synx_obj->global_idx);
  201. }
  202. synx_util_object_destroy(synx_obj);
  203. }
  204. void synx_util_get_object(struct synx_coredata *synx_obj)
  205. {
  206. kref_get(&synx_obj->refcount);
  207. }
  208. void synx_util_put_object(struct synx_coredata *synx_obj)
  209. {
  210. kref_put(&synx_obj->refcount, synx_util_destroy_coredata);
  211. }
  212. int synx_util_cleanup_merged_fence(struct synx_coredata *synx_obj, int status)
  213. {
  214. struct dma_fence_array *array = NULL;
  215. u32 i;
  216. int rc = 0;
  217. if (IS_ERR_OR_NULL(synx_obj) || IS_ERR_OR_NULL(synx_obj->fence))
  218. return -SYNX_INVALID;
  219. if (dma_fence_is_array(synx_obj->fence)) {
  220. array = to_dma_fence_array(synx_obj->fence);
  221. if (IS_ERR_OR_NULL(array))
  222. return -SYNX_INVALID;
  223. for (i = 0; i < array->num_fences; i++) {
  224. if (kref_read(&array->fences[i]->refcount) == 1 &&
  225. __fence_state(array->fences[i], false) == SYNX_STATE_ACTIVE) {
  226. dma_fence_set_error(array->fences[i],
  227. -SYNX_STATE_SIGNALED_CANCEL);
  228. rc = dma_fence_signal(array->fences[i]);
  229. if (rc)
  230. dprintk(SYNX_ERR,
  231. "signaling child fence %pK failed=%d\n",
  232. array->fences[i], rc);
  233. }
  234. dma_fence_put(array->fences[i]);
  235. }
  236. }
  237. return rc;
  238. }
  239. void synx_util_object_destroy(struct synx_coredata *synx_obj)
  240. {
  241. int rc;
  242. u32 i;
  243. s32 sync_id;
  244. u32 type;
  245. unsigned long flags;
  246. struct synx_cb_data *synx_cb, *synx_cb_temp;
  247. struct synx_bind_desc *bind_desc;
  248. struct bind_operations *bind_ops;
  249. struct synx_external_data *data;
  250. /* clear all the undispatched callbacks */
  251. list_for_each_entry_safe(synx_cb,
  252. synx_cb_temp, &synx_obj->reg_cbs_list, node) {
  253. dprintk(SYNX_ERR,
  254. "dipatching un-released callbacks of session %pK\n",
  255. synx_cb->session);
  256. synx_cb->status = SYNX_STATE_SIGNALED_CANCEL;
  257. if (synx_cb->timeout != SYNX_NO_TIMEOUT) {
  258. dprintk(SYNX_VERB,
  259. "Deleting timer synx_cb 0x%x, timeout 0x%llx\n",
  260. synx_cb, synx_cb->timeout);
  261. del_timer(&synx_cb->synx_timer);
  262. }
  263. list_del_init(&synx_cb->node);
  264. queue_work(synx_dev->wq_cb,
  265. &synx_cb->cb_dispatch);
  266. dprintk(SYNX_VERB, "dispatched callback for fence %pKn", synx_obj->fence);
  267. }
  268. for (i = 0; i < synx_obj->num_bound_synxs; i++) {
  269. bind_desc = &synx_obj->bound_synxs[i];
  270. sync_id = bind_desc->external_desc.id;
  271. type = bind_desc->external_desc.type;
  272. data = bind_desc->external_data;
  273. bind_ops = synx_util_get_bind_ops(type);
  274. if (IS_ERR_OR_NULL(bind_ops)) {
  275. dprintk(SYNX_ERR,
  276. "bind ops fail id: %d, type: %u, err: %d\n",
  277. sync_id, type, rc);
  278. continue;
  279. }
  280. /* clear the hash table entry */
  281. synx_util_remove_data(&sync_id, type);
  282. rc = bind_ops->deregister_callback(
  283. synx_external_callback, data, sync_id);
  284. if (rc < 0) {
  285. dprintk(SYNX_ERR,
  286. "de-registration fail id: %d, type: %u, err: %d\n",
  287. sync_id, type, rc);
  288. continue;
  289. }
  290. /*
  291. * release the memory allocated for external data.
  292. * It is safe to release this memory
  293. * only if deregistration is successful.
  294. */
  295. kfree(data);
  296. }
  297. mutex_destroy(&synx_obj->obj_lock);
  298. synx_util_release_fence_entry((u64)synx_obj->fence);
  299. /* dma fence framework expects handles are signaled before release,
  300. * so signal if active handle and has last refcount. Synx handles
  301. * on other cores are still active to carry out usual callflow.
  302. */
  303. if (!IS_ERR_OR_NULL(synx_obj->fence)) {
  304. spin_lock_irqsave(synx_obj->fence->lock, flags);
  305. if (synx_util_is_merged_object(synx_obj) &&
  306. synx_util_get_object_status_locked(synx_obj) == SYNX_STATE_ACTIVE)
  307. rc = synx_util_cleanup_merged_fence(synx_obj, -SYNX_STATE_SIGNALED_CANCEL);
  308. else if (kref_read(&synx_obj->fence->refcount) == 1 &&
  309. (synx_util_get_object_status_locked(synx_obj) ==
  310. SYNX_STATE_ACTIVE)) {
  311. // set fence error to cancel
  312. dma_fence_set_error(synx_obj->fence,
  313. -SYNX_STATE_SIGNALED_CANCEL);
  314. rc = dma_fence_signal_locked(synx_obj->fence);
  315. }
  316. spin_unlock_irqrestore(synx_obj->fence->lock, flags);
  317. if (rc)
  318. dprintk(SYNX_ERR,
  319. "signaling fence %pK failed=%d\n",
  320. synx_obj->fence, rc);
  321. }
  322. dma_fence_put(synx_obj->fence);
  323. kfree(synx_obj);
  324. dprintk(SYNX_MEM, "released synx object %pK\n", synx_obj);
  325. }
  326. long synx_util_get_free_handle(unsigned long *bitmap, unsigned int size)
  327. {
  328. bool bit;
  329. long idx;
  330. do {
  331. idx = find_first_zero_bit(bitmap, size);
  332. if (idx >= size)
  333. break;
  334. bit = test_and_set_bit(idx, bitmap);
  335. } while (bit);
  336. return idx;
  337. }
  338. u32 synx_encode_handle(u32 idx, u32 core_id, bool global_idx)
  339. {
  340. u32 handle = 0;
  341. if (idx >= SYNX_MAX_OBJS)
  342. return 0;
  343. if (global_idx) {
  344. handle = 1;
  345. handle <<= SYNX_HANDLE_CORE_BITS;
  346. }
  347. handle |= core_id;
  348. handle <<= SYNX_HANDLE_INDEX_BITS;
  349. handle |= idx;
  350. return handle;
  351. }
  352. int synx_alloc_global_handle(u32 *new_synx)
  353. {
  354. int rc;
  355. u32 idx;
  356. rc = synx_global_alloc_index(&idx);
  357. if (rc != SYNX_SUCCESS)
  358. return rc;
  359. *new_synx = synx_encode_handle(idx, SYNX_CORE_APSS, true);
  360. dprintk(SYNX_DBG, "allocated global handle %u (0x%x)\n",
  361. *new_synx, *new_synx);
  362. rc = synx_global_init_coredata(*new_synx);
  363. return rc;
  364. }
  365. int synx_alloc_local_handle(u32 *new_synx)
  366. {
  367. u32 idx;
  368. idx = synx_util_get_free_handle(synx_dev->native->bitmap,
  369. SYNX_MAX_OBJS);
  370. if (idx >= SYNX_MAX_OBJS)
  371. return -SYNX_NOMEM;
  372. *new_synx = synx_encode_handle(idx, SYNX_CORE_APSS, false);
  373. dprintk(SYNX_DBG, "allocated local handle %u (0x%x)\n",
  374. *new_synx, *new_synx);
  375. return SYNX_SUCCESS;
  376. }
  377. int synx_util_init_handle(struct synx_client *client,
  378. struct synx_coredata *synx_obj, u32 *new_h_synx,
  379. void *map_entry)
  380. {
  381. int rc = SYNX_SUCCESS;
  382. bool found = false;
  383. struct synx_handle_coredata *synx_data, *curr;
  384. if (IS_ERR_OR_NULL(client) || IS_ERR_OR_NULL(synx_obj) ||
  385. IS_ERR_OR_NULL(new_h_synx) || IS_ERR_OR_NULL(map_entry))
  386. return -SYNX_INVALID;
  387. synx_data = kzalloc(sizeof(*synx_data), GFP_ATOMIC);
  388. if (IS_ERR_OR_NULL(synx_data))
  389. return -SYNX_NOMEM;
  390. synx_data->client = client;
  391. synx_data->synx_obj = synx_obj;
  392. synx_data->key = *new_h_synx;
  393. synx_data->map_entry = map_entry;
  394. kref_init(&synx_data->refcount);
  395. synx_data->rel_count = 1;
  396. spin_lock_bh(&client->handle_map_lock);
  397. hash_for_each_possible(client->handle_map,
  398. curr, node, *new_h_synx) {
  399. if (curr->key == *new_h_synx) {
  400. if (curr->synx_obj != synx_obj) {
  401. rc = -SYNX_INVALID;
  402. dprintk(SYNX_ERR,
  403. "inconsistent data in handle map\n");
  404. } else {
  405. kref_get(&curr->refcount);
  406. curr->rel_count++;
  407. }
  408. found = true;
  409. break;
  410. }
  411. }
  412. if (unlikely(found))
  413. kfree(synx_data);
  414. else
  415. hash_add(client->handle_map,
  416. &synx_data->node, *new_h_synx);
  417. spin_unlock_bh(&client->handle_map_lock);
  418. return rc;
  419. }
  420. int synx_util_activate(struct synx_coredata *synx_obj)
  421. {
  422. if (IS_ERR_OR_NULL(synx_obj))
  423. return -SYNX_INVALID;
  424. /* move synx to ACTIVE state and register cb for merged object */
  425. dma_fence_enable_sw_signaling(synx_obj->fence);
  426. return 0;
  427. }
  428. static u32 synx_util_get_references(struct synx_coredata *synx_obj)
  429. {
  430. u32 count = 0;
  431. u32 i = 0;
  432. struct dma_fence_array *array = NULL;
  433. /* obtain dma fence reference */
  434. if (dma_fence_is_array(synx_obj->fence)) {
  435. array = to_dma_fence_array(synx_obj->fence);
  436. if (IS_ERR_OR_NULL(array))
  437. return 0;
  438. for (i = 0; i < array->num_fences; i++)
  439. dma_fence_get(array->fences[i]);
  440. count = array->num_fences;
  441. } else {
  442. dma_fence_get(synx_obj->fence);
  443. count = 1;
  444. }
  445. return count;
  446. }
  447. static void synx_util_put_references(struct synx_coredata *synx_obj)
  448. {
  449. u32 i = 0;
  450. struct dma_fence_array *array = NULL;
  451. if (dma_fence_is_array(synx_obj->fence)) {
  452. array = to_dma_fence_array(synx_obj->fence);
  453. if (IS_ERR_OR_NULL(array))
  454. return;
  455. for (i = 0; i < array->num_fences; i++)
  456. dma_fence_put(array->fences[i]);
  457. } else {
  458. dma_fence_put(synx_obj->fence);
  459. }
  460. }
  461. static u32 synx_util_add_fence(struct synx_coredata *synx_obj,
  462. struct dma_fence **fences,
  463. u32 idx)
  464. {
  465. struct dma_fence_array *array = NULL;
  466. u32 i = 0;
  467. if (dma_fence_is_array(synx_obj->fence)) {
  468. array = to_dma_fence_array(synx_obj->fence);
  469. if (IS_ERR_OR_NULL(array))
  470. return 0;
  471. for (i = 0; i < array->num_fences; i++)
  472. fences[idx+i] = array->fences[i];
  473. return array->num_fences;
  474. }
  475. fences[idx] = synx_obj->fence;
  476. return 1;
  477. }
  478. static u32 synx_util_remove_duplicates(struct dma_fence **arr, u32 num)
  479. {
  480. int i, j;
  481. u32 wr_idx = 1;
  482. if (IS_ERR_OR_NULL(arr)) {
  483. dprintk(SYNX_ERR, "invalid input array\n");
  484. return 0;
  485. }
  486. for (i = 1; i < num; i++) {
  487. for (j = 0; j < wr_idx ; j++) {
  488. if (arr[i] == arr[j]) {
  489. /* release reference obtained for duplicate */
  490. dprintk(SYNX_DBG,
  491. "releasing duplicate reference\n");
  492. dma_fence_put(arr[i]);
  493. break;
  494. }
  495. }
  496. if (j == wr_idx)
  497. arr[wr_idx++] = arr[i];
  498. }
  499. return wr_idx;
  500. }
  501. s32 synx_util_merge_error(struct synx_client *client,
  502. u32 *h_synxs,
  503. u32 num_objs)
  504. {
  505. u32 i = 0;
  506. struct synx_handle_coredata *synx_data;
  507. struct synx_coredata *synx_obj;
  508. if (IS_ERR_OR_NULL(client) || IS_ERR_OR_NULL(h_synxs))
  509. return -SYNX_INVALID;
  510. for (i = 0; i < num_objs; i++) {
  511. synx_data = synx_util_acquire_handle(client, h_synxs[i]);
  512. synx_obj = synx_util_obtain_object(synx_data);
  513. if (IS_ERR_OR_NULL(synx_obj) ||
  514. IS_ERR_OR_NULL(synx_obj->fence)) {
  515. dprintk(SYNX_ERR,
  516. "[sess :%llu] invalid handle %d in cleanup\n",
  517. client->id, h_synxs[i]);
  518. continue;
  519. }
  520. /* release all references obtained during merge validatation */
  521. synx_util_put_references(synx_obj);
  522. synx_util_release_handle(synx_data);
  523. }
  524. return 0;
  525. }
  526. int synx_util_validate_merge(struct synx_client *client,
  527. u32 *h_synxs,
  528. u32 num_objs,
  529. struct dma_fence ***fence_list,
  530. u32 *fence_cnt)
  531. {
  532. u32 count = 0;
  533. u32 i = 0;
  534. struct synx_handle_coredata **synx_datas;
  535. struct synx_coredata **synx_objs;
  536. struct dma_fence **fences = NULL;
  537. if (num_objs <= 1) {
  538. dprintk(SYNX_ERR, "single handle merge is not allowed\n");
  539. return -SYNX_INVALID;
  540. }
  541. synx_datas = kcalloc(num_objs, sizeof(*synx_datas), GFP_KERNEL);
  542. if (IS_ERR_OR_NULL(synx_datas))
  543. return -SYNX_NOMEM;
  544. synx_objs = kcalloc(num_objs, sizeof(*synx_objs), GFP_KERNEL);
  545. if (IS_ERR_OR_NULL(synx_objs)) {
  546. kfree(synx_datas);
  547. return -SYNX_NOMEM;
  548. }
  549. for (i = 0; i < num_objs; i++) {
  550. synx_datas[i] = synx_util_acquire_handle(client, h_synxs[i]);
  551. synx_objs[i] = synx_util_obtain_object(synx_datas[i]);
  552. if (IS_ERR_OR_NULL(synx_objs[i]) ||
  553. IS_ERR_OR_NULL(synx_objs[i]->fence)) {
  554. dprintk(SYNX_ERR,
  555. "[sess :%llu] invalid handle %d in merge list\n",
  556. client->id, h_synxs[i]);
  557. *fence_cnt = i;
  558. goto error;
  559. }
  560. count += synx_util_get_references(synx_objs[i]);
  561. }
  562. fences = kcalloc(count, sizeof(*fences), GFP_KERNEL);
  563. if (IS_ERR_OR_NULL(fences)) {
  564. *fence_cnt = num_objs;
  565. goto error;
  566. }
  567. /* memory will be released later in the invoking function */
  568. *fence_list = fences;
  569. count = 0;
  570. for (i = 0; i < num_objs; i++) {
  571. count += synx_util_add_fence(synx_objs[i], fences, count);
  572. /* release the reference obtained earlier in the function */
  573. synx_util_release_handle(synx_datas[i]);
  574. }
  575. *fence_cnt = synx_util_remove_duplicates(fences, count);
  576. kfree(synx_objs);
  577. kfree(synx_datas);
  578. return 0;
  579. error:
  580. /* release the reference/s obtained earlier in the function */
  581. for (i = 0; i < *fence_cnt; i++) {
  582. synx_util_put_references(synx_objs[i]);
  583. synx_util_release_handle(synx_datas[i]);
  584. }
  585. *fence_cnt = 0;
  586. kfree(synx_objs);
  587. kfree(synx_datas);
  588. return -SYNX_INVALID;
  589. }
  590. u32 __fence_state(struct dma_fence *fence, bool locked)
  591. {
  592. s32 status;
  593. u32 state = SYNX_STATE_INVALID;
  594. if (IS_ERR_OR_NULL(fence)) {
  595. dprintk(SYNX_ERR, "invalid fence\n");
  596. return SYNX_STATE_INVALID;
  597. }
  598. if (locked)
  599. status = dma_fence_get_status_locked(fence);
  600. else
  601. status = dma_fence_get_status(fence);
  602. /* convert fence status to synx state */
  603. switch (status) {
  604. case 0:
  605. state = SYNX_STATE_ACTIVE;
  606. break;
  607. case 1:
  608. state = SYNX_STATE_SIGNALED_SUCCESS;
  609. break;
  610. case -SYNX_STATE_SIGNALED_CANCEL:
  611. state = SYNX_STATE_SIGNALED_CANCEL;
  612. break;
  613. case -SYNX_STATE_SIGNALED_EXTERNAL:
  614. state = SYNX_STATE_SIGNALED_EXTERNAL;
  615. break;
  616. case -SYNX_STATE_SIGNALED_ERROR:
  617. state = SYNX_STATE_SIGNALED_ERROR;
  618. break;
  619. default:
  620. state = (u32)(-status);
  621. }
  622. return state;
  623. }
  624. static u32 __fence_group_state(struct dma_fence *fence, bool locked)
  625. {
  626. u32 i = 0;
  627. u32 state = SYNX_STATE_INVALID, parent_state = SYNX_STATE_INVALID;
  628. struct dma_fence_array *array = NULL;
  629. u32 intr, actv_cnt, sig_cnt, err_cnt;
  630. if (IS_ERR_OR_NULL(fence)) {
  631. dprintk(SYNX_ERR, "invalid fence\n");
  632. return SYNX_STATE_INVALID;
  633. }
  634. actv_cnt = sig_cnt = err_cnt = 0;
  635. array = to_dma_fence_array(fence);
  636. if (IS_ERR_OR_NULL(array))
  637. return SYNX_STATE_INVALID;
  638. for (i = 0; i < array->num_fences; i++) {
  639. intr = __fence_state(array->fences[i], locked);
  640. if (err_cnt == 0)
  641. parent_state = intr;
  642. switch (intr) {
  643. case SYNX_STATE_ACTIVE:
  644. actv_cnt++;
  645. break;
  646. case SYNX_STATE_SIGNALED_SUCCESS:
  647. sig_cnt++;
  648. break;
  649. default:
  650. intr > SYNX_STATE_SIGNALED_MAX ? sig_cnt++ : err_cnt++;
  651. }
  652. }
  653. dprintk(SYNX_DBG,
  654. "group cnt stats act:%u, sig: %u, err: %u\n",
  655. actv_cnt, sig_cnt, err_cnt);
  656. if (actv_cnt)
  657. state = SYNX_STATE_ACTIVE;
  658. else
  659. state = parent_state;
  660. return state;
  661. }
  662. /*
  663. * WARN: Should not hold the fence spinlock when invoking
  664. * this function. Use synx_fence_state_locked instead
  665. */
  666. u32 synx_util_get_object_status(struct synx_coredata *synx_obj)
  667. {
  668. u32 state;
  669. if (IS_ERR_OR_NULL(synx_obj))
  670. return SYNX_STATE_INVALID;
  671. if (synx_util_is_merged_object(synx_obj))
  672. state = __fence_group_state(synx_obj->fence, false);
  673. else
  674. state = __fence_state(synx_obj->fence, false);
  675. return state;
  676. }
  677. /* use this for status check when holding on to metadata spinlock */
  678. u32 synx_util_get_object_status_locked(struct synx_coredata *synx_obj)
  679. {
  680. u32 state;
  681. if (IS_ERR_OR_NULL(synx_obj))
  682. return SYNX_STATE_INVALID;
  683. if (synx_util_is_merged_object(synx_obj))
  684. state = __fence_group_state(synx_obj->fence, true);
  685. else
  686. state = __fence_state(synx_obj->fence, true);
  687. return state;
  688. }
  689. struct synx_handle_coredata *synx_util_acquire_handle(
  690. struct synx_client *client, u32 h_synx)
  691. {
  692. struct synx_handle_coredata *synx_data = NULL;
  693. struct synx_handle_coredata *synx_handle =
  694. ERR_PTR(-SYNX_NOENT);
  695. if (IS_ERR_OR_NULL(client))
  696. return ERR_PTR(-SYNX_INVALID);
  697. spin_lock_bh(&client->handle_map_lock);
  698. hash_for_each_possible(client->handle_map,
  699. synx_data, node, h_synx) {
  700. if (synx_data->key == h_synx &&
  701. synx_data->rel_count != 0) {
  702. kref_get(&synx_data->refcount);
  703. synx_handle = synx_data;
  704. break;
  705. }
  706. }
  707. spin_unlock_bh(&client->handle_map_lock);
  708. return synx_handle;
  709. }
  710. struct synx_map_entry *synx_util_insert_to_map(
  711. struct synx_coredata *synx_obj,
  712. u32 h_synx, u32 flags)
  713. {
  714. struct synx_map_entry *map_entry;
  715. map_entry = kzalloc(sizeof(*map_entry), GFP_KERNEL);
  716. if (IS_ERR_OR_NULL(map_entry))
  717. return ERR_PTR(-SYNX_NOMEM);
  718. kref_init(&map_entry->refcount);
  719. map_entry->synx_obj = synx_obj;
  720. map_entry->flags = flags;
  721. map_entry->key = h_synx;
  722. if (synx_util_is_global_handle(h_synx)) {
  723. spin_lock_bh(&synx_dev->native->global_map_lock);
  724. hash_add(synx_dev->native->global_map,
  725. &map_entry->node, h_synx);
  726. spin_unlock_bh(&synx_dev->native->global_map_lock);
  727. dprintk(SYNX_MEM,
  728. "added handle %u to global map %pK\n",
  729. h_synx, map_entry);
  730. } else {
  731. spin_lock_bh(&synx_dev->native->local_map_lock);
  732. hash_add(synx_dev->native->local_map,
  733. &map_entry->node, h_synx);
  734. spin_unlock_bh(&synx_dev->native->local_map_lock);
  735. dprintk(SYNX_MEM,
  736. "added handle %u to local map %pK\n",
  737. h_synx, map_entry);
  738. }
  739. return map_entry;
  740. }
  741. struct synx_map_entry *synx_util_get_map_entry(u32 h_synx)
  742. {
  743. struct synx_map_entry *curr;
  744. struct synx_map_entry *map_entry = ERR_PTR(-SYNX_NOENT);
  745. if (h_synx == 0)
  746. return ERR_PTR(-SYNX_INVALID);
  747. if (synx_util_is_global_handle(h_synx)) {
  748. spin_lock_bh(&synx_dev->native->global_map_lock);
  749. hash_for_each_possible(synx_dev->native->global_map,
  750. curr, node, h_synx) {
  751. if (curr->key == h_synx) {
  752. kref_get(&curr->refcount);
  753. map_entry = curr;
  754. break;
  755. }
  756. }
  757. spin_unlock_bh(&synx_dev->native->global_map_lock);
  758. } else {
  759. spin_lock_bh(&synx_dev->native->local_map_lock);
  760. hash_for_each_possible(synx_dev->native->local_map,
  761. curr, node, h_synx) {
  762. if (curr->key == h_synx) {
  763. kref_get(&curr->refcount);
  764. map_entry = curr;
  765. break;
  766. }
  767. }
  768. spin_unlock_bh(&synx_dev->native->local_map_lock);
  769. }
  770. /* should we allocate if entry not found? */
  771. return map_entry;
  772. }
  773. static void synx_util_cleanup_fence(
  774. struct synx_coredata *synx_obj)
  775. {
  776. struct synx_signal_cb *signal_cb;
  777. unsigned long flags;
  778. u32 g_status;
  779. u32 f_status;
  780. u32 h_synx = 0;
  781. mutex_lock(&synx_obj->obj_lock);
  782. synx_obj->map_count--;
  783. signal_cb = synx_obj->signal_cb;
  784. f_status = synx_util_get_object_status(synx_obj);
  785. dprintk(SYNX_VERB, "f_status:%u, signal_cb:%p, map:%u, idx:%u\n",
  786. f_status, signal_cb, synx_obj->map_count, synx_obj->global_idx);
  787. if (synx_obj->map_count == 0 &&
  788. (signal_cb != NULL) &&
  789. (synx_obj->global_idx != 0) &&
  790. (f_status == SYNX_STATE_ACTIVE)) {
  791. /*
  792. * no more clients interested for notification
  793. * on handle on local core.
  794. * remove reference held by callback on synx
  795. * coredata structure and update cb (if still
  796. * un-signaled) with global handle idx to
  797. * notify any cross-core clients waiting on
  798. * handle.
  799. */
  800. g_status = synx_global_get_status(synx_obj->global_idx);
  801. if (g_status > SYNX_STATE_ACTIVE) {
  802. dprintk(SYNX_DBG, "signaling fence %pK with status %u\n",
  803. synx_obj->fence, g_status);
  804. synx_native_signal_fence(synx_obj, g_status);
  805. } else {
  806. spin_lock_irqsave(synx_obj->fence->lock, flags);
  807. if (synx_util_get_object_status_locked(synx_obj) ==
  808. SYNX_STATE_ACTIVE) {
  809. signal_cb->synx_obj = NULL;
  810. synx_global_fetch_handle_details(synx_obj->global_idx, &h_synx);
  811. signal_cb->handle = h_synx;
  812. synx_obj->signal_cb = NULL;
  813. /*
  814. * release reference held by signal cb and
  815. * get reference on global index instead.
  816. */
  817. synx_util_put_object(synx_obj);
  818. synx_global_get_ref(synx_obj->global_idx);
  819. }
  820. spin_unlock_irqrestore(synx_obj->fence->lock, flags);
  821. }
  822. } else if (synx_obj->map_count == 0 && signal_cb &&
  823. (f_status == SYNX_STATE_ACTIVE)) {
  824. if (dma_fence_remove_callback(synx_obj->fence,
  825. &signal_cb->fence_cb)) {
  826. kfree(signal_cb);
  827. synx_obj->signal_cb = NULL;
  828. /*
  829. * release reference held by signal cb and
  830. * get reference on global index instead.
  831. */
  832. synx_util_put_object(synx_obj);
  833. dprintk(SYNX_MEM, "signal cb destroyed %pK\n",
  834. synx_obj->signal_cb);
  835. }
  836. }
  837. mutex_unlock(&synx_obj->obj_lock);
  838. }
  839. static void synx_util_destroy_map_entry_worker(
  840. struct work_struct *dispatch)
  841. {
  842. struct synx_map_entry *map_entry =
  843. container_of(dispatch, struct synx_map_entry, dispatch);
  844. struct synx_coredata *synx_obj;
  845. synx_obj = map_entry->synx_obj;
  846. if (!IS_ERR_OR_NULL(synx_obj)) {
  847. synx_util_cleanup_fence(synx_obj);
  848. /* release reference held by map entry */
  849. synx_util_put_object(synx_obj);
  850. }
  851. if (!synx_util_is_global_handle(map_entry->key))
  852. clear_bit(synx_util_global_idx(map_entry->key),
  853. synx_dev->native->bitmap);
  854. dprintk(SYNX_VERB, "map entry for %u destroyed %pK\n",
  855. map_entry->key, map_entry);
  856. kfree(map_entry);
  857. }
  858. void synx_util_destroy_map_entry(struct kref *kref)
  859. {
  860. struct synx_map_entry *map_entry =
  861. container_of(kref, struct synx_map_entry, refcount);
  862. hash_del(&map_entry->node);
  863. dprintk(SYNX_MEM, "map entry for %u removed %pK\n",
  864. map_entry->key, map_entry);
  865. INIT_WORK(&map_entry->dispatch, synx_util_destroy_map_entry_worker);
  866. queue_work(synx_dev->wq_cleanup, &map_entry->dispatch);
  867. }
  868. void synx_util_release_map_entry(struct synx_map_entry *map_entry)
  869. {
  870. spinlock_t *lock;
  871. if (IS_ERR_OR_NULL(map_entry))
  872. return;
  873. if (synx_util_is_global_handle(map_entry->key))
  874. lock = &synx_dev->native->global_map_lock;
  875. else
  876. lock = &synx_dev->native->local_map_lock;
  877. spin_lock_bh(lock);
  878. kref_put(&map_entry->refcount,
  879. synx_util_destroy_map_entry);
  880. spin_unlock_bh(lock);
  881. }
  882. static void synx_util_destroy_handle_worker(
  883. struct work_struct *dispatch)
  884. {
  885. struct synx_handle_coredata *synx_data =
  886. container_of(dispatch, struct synx_handle_coredata,
  887. dispatch);
  888. synx_util_release_map_entry(synx_data->map_entry);
  889. dprintk(SYNX_VERB, "handle %u destroyed %pK\n",
  890. synx_data->key, synx_data);
  891. kfree(synx_data);
  892. }
  893. void synx_util_destroy_handle(struct kref *kref)
  894. {
  895. struct synx_handle_coredata *synx_data =
  896. container_of(kref, struct synx_handle_coredata,
  897. refcount);
  898. hash_del(&synx_data->node);
  899. dprintk(SYNX_MEM, "[sess :%llu] handle %u removed %pK\n",
  900. synx_data->client->id, synx_data->key, synx_data);
  901. INIT_WORK(&synx_data->dispatch, synx_util_destroy_handle_worker);
  902. queue_work(synx_dev->wq_cleanup, &synx_data->dispatch);
  903. }
  904. void synx_util_release_handle(struct synx_handle_coredata *synx_data)
  905. {
  906. struct synx_client *client;
  907. if (IS_ERR_OR_NULL(synx_data))
  908. return;
  909. client = synx_data->client;
  910. if (IS_ERR_OR_NULL(client))
  911. return;
  912. spin_lock_bh(&client->handle_map_lock);
  913. kref_put(&synx_data->refcount,
  914. synx_util_destroy_handle);
  915. spin_unlock_bh(&client->handle_map_lock);
  916. }
  917. struct bind_operations *synx_util_get_bind_ops(u32 type)
  918. {
  919. struct synx_registered_ops *client_ops;
  920. if (!synx_util_is_valid_bind_type(type))
  921. return NULL;
  922. mutex_lock(&synx_dev->vtbl_lock);
  923. client_ops = &synx_dev->bind_vtbl[type];
  924. if (!client_ops->valid) {
  925. mutex_unlock(&synx_dev->vtbl_lock);
  926. return NULL;
  927. }
  928. mutex_unlock(&synx_dev->vtbl_lock);
  929. return &client_ops->ops;
  930. }
  931. int synx_util_alloc_cb_entry(struct synx_client *client,
  932. struct synx_kernel_payload *data,
  933. u32 *cb_idx)
  934. {
  935. long idx;
  936. struct synx_client_cb *cb;
  937. if (IS_ERR_OR_NULL(client) || IS_ERR_OR_NULL(data) ||
  938. IS_ERR_OR_NULL(cb_idx))
  939. return -SYNX_INVALID;
  940. idx = synx_util_get_free_handle(client->cb_bitmap, SYNX_MAX_OBJS);
  941. if (idx >= SYNX_MAX_OBJS) {
  942. dprintk(SYNX_ERR,
  943. "[sess :%llu] free cb index not available\n",
  944. client->id);
  945. return -SYNX_NOMEM;
  946. }
  947. cb = &client->cb_table[idx];
  948. memset(cb, 0, sizeof(*cb));
  949. cb->is_valid = true;
  950. cb->client = client;
  951. cb->idx = idx;
  952. memcpy(&cb->kernel_cb, data,
  953. sizeof(cb->kernel_cb));
  954. *cb_idx = idx;
  955. dprintk(SYNX_VERB, "[sess :%llu] allocated cb index %u\n",
  956. client->id, *cb_idx);
  957. return 0;
  958. }
  959. int synx_util_clear_cb_entry(struct synx_client *client,
  960. struct synx_client_cb *cb)
  961. {
  962. int rc = 0;
  963. u32 idx;
  964. if (IS_ERR_OR_NULL(cb))
  965. return -SYNX_INVALID;
  966. idx = cb->idx;
  967. memset(cb, 0, sizeof(*cb));
  968. if (idx && idx < SYNX_MAX_OBJS) {
  969. clear_bit(idx, client->cb_bitmap);
  970. } else {
  971. dprintk(SYNX_ERR, "invalid index\n");
  972. rc = -SYNX_INVALID;
  973. }
  974. return rc;
  975. }
  976. void synx_util_default_user_callback(u32 h_synx,
  977. int status, void *data)
  978. {
  979. struct synx_client_cb *cb = data;
  980. struct synx_client *client = NULL;
  981. if (cb && cb->client) {
  982. client = cb->client;
  983. dprintk(SYNX_VERB,
  984. "[sess :%llu] user cb queued for handle %d\n",
  985. client->id, h_synx);
  986. cb->kernel_cb.status = status;
  987. mutex_lock(&client->event_q_lock);
  988. list_add_tail(&cb->node, &client->event_q);
  989. mutex_unlock(&client->event_q_lock);
  990. wake_up_all(&client->event_wq);
  991. } else {
  992. dprintk(SYNX_ERR, "invalid params\n");
  993. }
  994. }
  995. void synx_util_callback_dispatch(struct synx_coredata *synx_obj, u32 status)
  996. {
  997. struct synx_cb_data *synx_cb, *synx_cb_temp;
  998. if (IS_ERR_OR_NULL(synx_obj)) {
  999. dprintk(SYNX_ERR, "invalid arguments\n");
  1000. return;
  1001. }
  1002. list_for_each_entry_safe(synx_cb,
  1003. synx_cb_temp, &synx_obj->reg_cbs_list, node) {
  1004. synx_cb->status = status;
  1005. if (synx_cb->timeout != SYNX_NO_TIMEOUT) {
  1006. dprintk(SYNX_VERB,
  1007. "Deleting timer synx_cb 0x%x, timeout 0x%llx\n",
  1008. synx_cb, synx_cb->timeout);
  1009. del_timer(&synx_cb->synx_timer);
  1010. }
  1011. list_del_init(&synx_cb->node);
  1012. queue_work(synx_dev->wq_cb,
  1013. &synx_cb->cb_dispatch);
  1014. dprintk(SYNX_VERB, "dispatched callback\n");
  1015. }
  1016. }
  1017. void synx_util_cb_dispatch(struct work_struct *cb_dispatch)
  1018. {
  1019. struct synx_cb_data *synx_cb =
  1020. container_of(cb_dispatch, struct synx_cb_data, cb_dispatch);
  1021. struct synx_client *client;
  1022. struct synx_client_cb *cb;
  1023. struct synx_kernel_payload payload;
  1024. u32 status;
  1025. client = synx_get_client(synx_cb->session);
  1026. if (IS_ERR_OR_NULL(client)) {
  1027. dprintk(SYNX_ERR,
  1028. "invalid session data %pK in cb payload\n",
  1029. synx_cb->session);
  1030. goto free;
  1031. }
  1032. if (synx_cb->idx == 0 ||
  1033. synx_cb->idx >= SYNX_MAX_OBJS) {
  1034. dprintk(SYNX_ERR,
  1035. "[sess :%llu] invalid cb index %u\n",
  1036. client->id, synx_cb->idx);
  1037. goto fail;
  1038. }
  1039. status = synx_cb->status;
  1040. cb = &client->cb_table[synx_cb->idx];
  1041. if (!cb->is_valid) {
  1042. dprintk(SYNX_ERR, "invalid cb payload\n");
  1043. goto fail;
  1044. }
  1045. memcpy(&payload, &cb->kernel_cb, sizeof(cb->kernel_cb));
  1046. payload.status = status;
  1047. if (payload.cb_func == synx_util_default_user_callback) {
  1048. /*
  1049. * need to send client cb data for default
  1050. * user cb (userspace cb)
  1051. */
  1052. payload.data = cb;
  1053. } else {
  1054. /*
  1055. * clear the cb entry. userspace cb entry
  1056. * will be cleared after data read by the
  1057. * polling thread or when client is destroyed
  1058. */
  1059. if (synx_util_clear_cb_entry(client, cb))
  1060. dprintk(SYNX_ERR,
  1061. "[sess :%llu] error clearing cb entry\n",
  1062. client->id);
  1063. }
  1064. dprintk(SYNX_DBG,
  1065. "callback dispatched for handle %u, status %u, data %pK\n",
  1066. payload.h_synx, payload.status, payload.data);
  1067. /* dispatch kernel callback */
  1068. payload.cb_func(payload.h_synx,
  1069. payload.status, payload.data);
  1070. fail:
  1071. synx_put_client(client);
  1072. free:
  1073. kfree(synx_cb);
  1074. }
  1075. int synx_get_child_coredata(struct synx_coredata *synx_obj, struct synx_coredata ***child_synx_obj, int *num_fences)
  1076. {
  1077. int rc = SYNX_SUCCESS;
  1078. int i = 0, handle_count = 0;
  1079. u32 h_child = 0;
  1080. struct dma_fence_array *array = NULL;
  1081. struct synx_coredata **synx_datas = NULL;
  1082. struct synx_map_entry *fence_entry = NULL;
  1083. if (IS_ERR_OR_NULL(synx_obj) || IS_ERR_OR_NULL(num_fences))
  1084. return -SYNX_INVALID;
  1085. if (dma_fence_is_array(synx_obj->fence)) {
  1086. array = to_dma_fence_array(synx_obj->fence);
  1087. if (IS_ERR_OR_NULL(array))
  1088. return -SYNX_INVALID;
  1089. synx_datas = kcalloc(array->num_fences, sizeof(*synx_datas), GFP_KERNEL);
  1090. if (IS_ERR_OR_NULL(synx_datas))
  1091. return -SYNX_NOMEM;
  1092. for (i = 0; i < array->num_fences; i++) {
  1093. h_child = synx_util_get_fence_entry((u64)array->fences[i], 1);
  1094. fence_entry = synx_util_get_map_entry(h_child);
  1095. if (IS_ERR_OR_NULL(fence_entry) || IS_ERR_OR_NULL(fence_entry->synx_obj))
  1096. {
  1097. dprintk(SYNX_ERR, "Invalid handle access %u", h_child);
  1098. rc = -SYNX_NOENT;
  1099. goto fail;
  1100. }
  1101. synx_datas[handle_count++] = fence_entry->synx_obj;
  1102. synx_util_release_map_entry(fence_entry);
  1103. }
  1104. }
  1105. *child_synx_obj = synx_datas;
  1106. *num_fences = handle_count;
  1107. return rc;
  1108. fail:
  1109. kfree(synx_datas);
  1110. return rc;
  1111. }
  1112. u32 synx_util_get_fence_entry(u64 key, u32 global)
  1113. {
  1114. u32 h_synx = 0;
  1115. struct synx_fence_entry *curr;
  1116. spin_lock_bh(&synx_dev->native->fence_map_lock);
  1117. hash_for_each_possible(synx_dev->native->fence_map,
  1118. curr, node, key) {
  1119. if (curr->key == key) {
  1120. if (global)
  1121. h_synx = curr->g_handle;
  1122. /* return local handle if global not available */
  1123. if (h_synx == 0)
  1124. h_synx = curr->l_handle;
  1125. break;
  1126. }
  1127. }
  1128. spin_unlock_bh(&synx_dev->native->fence_map_lock);
  1129. return h_synx;
  1130. }
  1131. void synx_util_release_fence_entry(u64 key)
  1132. {
  1133. struct synx_fence_entry *entry = NULL, *curr;
  1134. spin_lock_bh(&synx_dev->native->fence_map_lock);
  1135. hash_for_each_possible(synx_dev->native->fence_map,
  1136. curr, node, key) {
  1137. if (curr->key == key) {
  1138. entry = curr;
  1139. break;
  1140. }
  1141. }
  1142. if (entry) {
  1143. hash_del(&entry->node);
  1144. dprintk(SYNX_MEM,
  1145. "released fence entry %pK for fence %pK\n",
  1146. entry, (void *)key);
  1147. kfree(entry);
  1148. }
  1149. spin_unlock_bh(&synx_dev->native->fence_map_lock);
  1150. }
  1151. int synx_util_insert_fence_entry(struct synx_fence_entry *entry,
  1152. u32 *h_synx, u32 global)
  1153. {
  1154. int rc = SYNX_SUCCESS;
  1155. struct synx_fence_entry *curr;
  1156. if (IS_ERR_OR_NULL(entry) || IS_ERR_OR_NULL(h_synx))
  1157. return -SYNX_INVALID;
  1158. spin_lock_bh(&synx_dev->native->fence_map_lock);
  1159. hash_for_each_possible(synx_dev->native->fence_map,
  1160. curr, node, entry->key) {
  1161. /* raced with import from another process on same fence */
  1162. if (curr->key == entry->key) {
  1163. if (global)
  1164. *h_synx = curr->g_handle;
  1165. if (*h_synx == 0 || !global)
  1166. *h_synx = curr->l_handle;
  1167. rc = -SYNX_ALREADY;
  1168. break;
  1169. }
  1170. }
  1171. /* add entry only if its not present in the map */
  1172. if (rc == SYNX_SUCCESS) {
  1173. hash_add(synx_dev->native->fence_map,
  1174. &entry->node, entry->key);
  1175. dprintk(SYNX_MEM,
  1176. "added fence entry %pK for fence %pK\n",
  1177. entry, (void *)entry->key);
  1178. }
  1179. spin_unlock_bh(&synx_dev->native->fence_map_lock);
  1180. return rc;
  1181. }
  1182. struct synx_client *synx_get_client(struct synx_session *session)
  1183. {
  1184. struct synx_client *client = NULL;
  1185. struct synx_client *curr;
  1186. if (IS_ERR_OR_NULL(session))
  1187. return ERR_PTR(-SYNX_INVALID);
  1188. spin_lock_bh(&synx_dev->native->metadata_map_lock);
  1189. hash_for_each_possible(synx_dev->native->client_metadata_map,
  1190. curr, node, (u64)session) {
  1191. if (curr == (struct synx_client *)session) {
  1192. if (curr->active) {
  1193. kref_get(&curr->refcount);
  1194. client = curr;
  1195. }
  1196. break;
  1197. }
  1198. }
  1199. spin_unlock_bh(&synx_dev->native->metadata_map_lock);
  1200. return client;
  1201. }
  1202. static void synx_client_cleanup(struct work_struct *dispatch)
  1203. {
  1204. int i, j;
  1205. struct synx_client *client =
  1206. container_of(dispatch, struct synx_client, dispatch);
  1207. struct synx_handle_coredata *curr;
  1208. struct hlist_node *tmp;
  1209. dprintk(SYNX_INFO, "[sess :%llu] session removed %s\n",
  1210. client->id, client->name);
  1211. /*
  1212. * go over all the remaining synx obj handles
  1213. * un-released from this session and remove them.
  1214. */
  1215. hash_for_each_safe(client->handle_map, i, tmp, curr, node) {
  1216. dprintk(SYNX_WARN,
  1217. "[sess :%llu] un-released handle %u\n",
  1218. client->id, curr->key);
  1219. j = kref_read(&curr->refcount);
  1220. /* release pending reference */
  1221. while (j--)
  1222. kref_put(&curr->refcount, synx_util_destroy_handle);
  1223. }
  1224. mutex_destroy(&client->event_q_lock);
  1225. dprintk(SYNX_VERB, "session %llu [%s] destroyed %pK\n",
  1226. client->id, client->name, client);
  1227. vfree(client);
  1228. }
  1229. void synx_client_destroy(struct kref *kref)
  1230. {
  1231. struct synx_client *client =
  1232. container_of(kref, struct synx_client, refcount);
  1233. hash_del(&client->node);
  1234. INIT_WORK(&client->dispatch, synx_client_cleanup);
  1235. queue_work(synx_dev->wq_cleanup, &client->dispatch);
  1236. }
  1237. void synx_put_client(struct synx_client *client)
  1238. {
  1239. if (IS_ERR_OR_NULL(client))
  1240. return;
  1241. spin_lock_bh(&synx_dev->native->metadata_map_lock);
  1242. kref_put(&client->refcount, synx_client_destroy);
  1243. spin_unlock_bh(&synx_dev->native->metadata_map_lock);
  1244. }
  1245. void synx_util_generate_timestamp(char *timestamp, size_t size)
  1246. {
  1247. struct timespec64 tv;
  1248. struct tm tm;
  1249. ktime_get_real_ts64(&tv);
  1250. time64_to_tm(tv.tv_sec, 0, &tm);
  1251. snprintf(timestamp, size, "%02d-%02d %02d:%02d:%02d",
  1252. tm.tm_mon + 1, tm.tm_mday, tm.tm_hour,
  1253. tm.tm_min, tm.tm_sec);
  1254. }
  1255. void synx_util_log_error(u32 client_id, u32 h_synx, s32 err)
  1256. {
  1257. struct error_node *err_node;
  1258. if (!synx_dev->debugfs_root)
  1259. return;
  1260. err_node = kzalloc(sizeof(*err_node), GFP_KERNEL);
  1261. if (!err_node)
  1262. return;
  1263. err_node->client_id = client_id;
  1264. err_node->error_code = err;
  1265. err_node->h_synx = h_synx;
  1266. synx_util_generate_timestamp(err_node->timestamp,
  1267. sizeof(err_node->timestamp));
  1268. mutex_lock(&synx_dev->error_lock);
  1269. list_add(&err_node->node,
  1270. &synx_dev->error_list);
  1271. mutex_unlock(&synx_dev->error_lock);
  1272. }
  1273. int synx_util_save_data(void *fence, u32 flags,
  1274. u32 h_synx)
  1275. {
  1276. int rc = SYNX_SUCCESS;
  1277. struct synx_entry_64 *entry, *curr;
  1278. u64 key;
  1279. u32 tbl = synx_util_map_params_to_type(flags);
  1280. switch (tbl) {
  1281. case SYNX_TYPE_CSL:
  1282. key = *(u32 *)fence;
  1283. spin_lock_bh(&synx_dev->native->csl_map_lock);
  1284. /* ensure fence is not already added to map */
  1285. hash_for_each_possible(synx_dev->native->csl_fence_map,
  1286. curr, node, key) {
  1287. if (curr->key == key) {
  1288. rc = -SYNX_ALREADY;
  1289. break;
  1290. }
  1291. }
  1292. if (rc == SYNX_SUCCESS) {
  1293. entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
  1294. if (entry) {
  1295. entry->data[0] = h_synx;
  1296. entry->key = key;
  1297. kref_init(&entry->refcount);
  1298. hash_add(synx_dev->native->csl_fence_map,
  1299. &entry->node, entry->key);
  1300. dprintk(SYNX_MEM, "added csl fence %d to map %pK\n",
  1301. entry->key, entry);
  1302. } else {
  1303. rc = -SYNX_NOMEM;
  1304. }
  1305. }
  1306. spin_unlock_bh(&synx_dev->native->csl_map_lock);
  1307. break;
  1308. default:
  1309. dprintk(SYNX_ERR, "invalid hash table selection\n");
  1310. kfree(entry);
  1311. rc = -SYNX_INVALID;
  1312. }
  1313. return rc;
  1314. }
  1315. struct synx_entry_64 *synx_util_retrieve_data(void *fence,
  1316. u32 type)
  1317. {
  1318. u64 key;
  1319. struct synx_entry_64 *entry = NULL;
  1320. struct synx_entry_64 *curr;
  1321. switch (type) {
  1322. case SYNX_TYPE_CSL:
  1323. key = *(u32 *)fence;
  1324. spin_lock_bh(&synx_dev->native->csl_map_lock);
  1325. hash_for_each_possible(synx_dev->native->csl_fence_map,
  1326. curr, node, key) {
  1327. if (curr->key == key) {
  1328. kref_get(&curr->refcount);
  1329. entry = curr;
  1330. break;
  1331. }
  1332. }
  1333. spin_unlock_bh(&synx_dev->native->csl_map_lock);
  1334. break;
  1335. default:
  1336. dprintk(SYNX_ERR, "invalid hash table selection %u\n",
  1337. type);
  1338. }
  1339. return entry;
  1340. }
  1341. void synx_util_destroy_data(struct kref *kref)
  1342. {
  1343. struct synx_entry_64 *entry =
  1344. container_of(kref, struct synx_entry_64, refcount);
  1345. hash_del(&entry->node);
  1346. dprintk(SYNX_MEM, "released fence %llu entry %pK\n",
  1347. entry->key, entry);
  1348. kfree(entry);
  1349. }
  1350. void synx_util_remove_data(void *fence,
  1351. u32 type)
  1352. {
  1353. u64 key;
  1354. struct synx_entry_64 *entry = NULL;
  1355. struct synx_entry_64 *curr;
  1356. if (IS_ERR_OR_NULL(fence))
  1357. return;
  1358. switch (type) {
  1359. case SYNX_TYPE_CSL:
  1360. key = *((u32 *)fence);
  1361. spin_lock_bh(&synx_dev->native->csl_map_lock);
  1362. hash_for_each_possible(synx_dev->native->csl_fence_map,
  1363. curr, node, key) {
  1364. if (curr->key == key) {
  1365. entry = curr;
  1366. break;
  1367. }
  1368. }
  1369. if (entry)
  1370. kref_put(&entry->refcount, synx_util_destroy_data);
  1371. spin_unlock_bh(&synx_dev->native->csl_map_lock);
  1372. break;
  1373. default:
  1374. dprintk(SYNX_ERR, "invalid hash table selection %u\n",
  1375. type);
  1376. }
  1377. }
  1378. void synx_util_map_import_params_to_create(
  1379. struct synx_import_indv_params *params,
  1380. struct synx_create_params *c_params)
  1381. {
  1382. if (IS_ERR_OR_NULL(params) || IS_ERR_OR_NULL(c_params))
  1383. return;
  1384. if (params->flags & SYNX_IMPORT_GLOBAL_FENCE)
  1385. c_params->flags |= SYNX_CREATE_GLOBAL_FENCE;
  1386. if (params->flags & SYNX_IMPORT_LOCAL_FENCE)
  1387. c_params->flags |= SYNX_CREATE_LOCAL_FENCE;
  1388. if (params->flags & SYNX_IMPORT_DMA_FENCE)
  1389. c_params->flags |= SYNX_CREATE_DMA_FENCE;
  1390. }
  1391. u32 synx_util_map_client_id_to_core(
  1392. enum synx_client_id id)
  1393. {
  1394. u32 core_id;
  1395. switch (id) {
  1396. case SYNX_CLIENT_NATIVE:
  1397. core_id = SYNX_CORE_APSS; break;
  1398. case SYNX_CLIENT_ICP_CTX0:
  1399. core_id = SYNX_CORE_ICP; break;
  1400. case SYNX_CLIENT_EVA_CTX0:
  1401. core_id = SYNX_CORE_EVA; break;
  1402. case SYNX_CLIENT_VID_CTX0:
  1403. core_id = SYNX_CORE_IRIS; break;
  1404. case SYNX_CLIENT_NSP_CTX0:
  1405. core_id = SYNX_CORE_NSP; break;
  1406. default:
  1407. core_id = SYNX_CORE_MAX;
  1408. }
  1409. return core_id;
  1410. }