synx_util.c 39 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2019-2021, The Linux Foundation. All rights reserved.
  4. * Copyright (c) 2022-2023, Qualcomm Innovation Center, Inc. All rights reserved.
  5. */
  6. #include <linux/slab.h>
  7. #include <linux/random.h>
  8. #include <linux/vmalloc.h>
  9. #include "synx_debugfs.h"
  10. #include "synx_util.h"
  11. extern void synx_external_callback(s32 sync_obj, int status, void *data);
  12. static u32 __fence_state(struct dma_fence *fence, bool locked);
  13. int synx_util_init_coredata(struct synx_coredata *synx_obj,
  14. struct synx_create_params *params,
  15. struct dma_fence_ops *ops,
  16. u64 dma_context)
  17. {
  18. int rc = -SYNX_INVALID;
  19. spinlock_t *fence_lock;
  20. struct dma_fence *fence;
  21. struct synx_fence_entry *entry;
  22. if (IS_ERR_OR_NULL(synx_obj) || IS_ERR_OR_NULL(params) ||
  23. IS_ERR_OR_NULL(ops) || IS_ERR_OR_NULL(params->h_synx))
  24. return -SYNX_INVALID;
  25. if (params->flags & SYNX_CREATE_GLOBAL_FENCE &&
  26. *params->h_synx != 0) {
  27. rc = synx_global_get_ref(
  28. synx_util_global_idx(*params->h_synx));
  29. synx_obj->global_idx = synx_util_global_idx(*params->h_synx);
  30. } else if (params->flags & SYNX_CREATE_GLOBAL_FENCE) {
  31. rc = synx_alloc_global_handle(params->h_synx);
  32. synx_obj->global_idx = synx_util_global_idx(*params->h_synx);
  33. } else {
  34. rc = synx_alloc_local_handle(params->h_synx);
  35. }
  36. if (rc != SYNX_SUCCESS)
  37. return rc;
  38. synx_obj->map_count = 1;
  39. synx_obj->num_bound_synxs = 0;
  40. synx_obj->type |= params->flags;
  41. kref_init(&synx_obj->refcount);
  42. mutex_init(&synx_obj->obj_lock);
  43. INIT_LIST_HEAD(&synx_obj->reg_cbs_list);
  44. if (params->name)
  45. strlcpy(synx_obj->name, params->name, sizeof(synx_obj->name));
  46. if (params->flags & SYNX_CREATE_DMA_FENCE) {
  47. fence = params->fence;
  48. if (IS_ERR_OR_NULL(fence)) {
  49. dprintk(SYNX_ERR, "invalid external fence\n");
  50. goto free;
  51. }
  52. dma_fence_get(fence);
  53. synx_obj->fence = fence;
  54. } else {
  55. /*
  56. * lock and fence memory will be released in fence
  57. * release function
  58. */
  59. fence_lock = kzalloc(sizeof(*fence_lock), GFP_KERNEL);
  60. if (IS_ERR_OR_NULL(fence_lock)) {
  61. rc = -SYNX_NOMEM;
  62. goto free;
  63. }
  64. fence = kzalloc(sizeof(*fence), GFP_KERNEL);
  65. if (IS_ERR_OR_NULL(fence)) {
  66. kfree(fence_lock);
  67. rc = -SYNX_NOMEM;
  68. goto free;
  69. }
  70. spin_lock_init(fence_lock);
  71. dma_fence_init(fence, ops, fence_lock, dma_context, 1);
  72. synx_obj->fence = fence;
  73. synx_util_activate(synx_obj);
  74. dprintk(SYNX_MEM,
  75. "allocated backing fence %pK\n", fence);
  76. entry = kzalloc(sizeof(*entry), GFP_KERNEL);
  77. if (IS_ERR_OR_NULL(entry)) {
  78. rc = -SYNX_NOMEM;
  79. goto clean;
  80. }
  81. entry->key = (u64)fence;
  82. if (params->flags & SYNX_CREATE_GLOBAL_FENCE)
  83. entry->g_handle = *params->h_synx;
  84. else
  85. entry->l_handle = *params->h_synx;
  86. rc = synx_util_insert_fence_entry(entry,
  87. params->h_synx,
  88. params->flags & SYNX_CREATE_GLOBAL_FENCE);
  89. BUG_ON(rc != SYNX_SUCCESS);
  90. }
  91. if (rc != SYNX_SUCCESS)
  92. goto clean;
  93. synx_obj->status = synx_util_get_object_status(synx_obj);
  94. return SYNX_SUCCESS;
  95. clean:
  96. dma_fence_put(fence);
  97. free:
  98. if (params->flags & SYNX_CREATE_GLOBAL_FENCE)
  99. synx_global_put_ref(
  100. synx_util_global_idx(*params->h_synx));
  101. else
  102. clear_bit(synx_util_global_idx(*params->h_synx),
  103. synx_dev->native->bitmap);
  104. return rc;
  105. }
  106. int synx_util_add_callback(struct synx_coredata *synx_obj,
  107. u32 h_synx)
  108. {
  109. int rc;
  110. struct synx_signal_cb *signal_cb;
  111. if (IS_ERR_OR_NULL(synx_obj))
  112. return -SYNX_INVALID;
  113. signal_cb = kzalloc(sizeof(*signal_cb), GFP_KERNEL);
  114. if (IS_ERR_OR_NULL(signal_cb))
  115. return -SYNX_NOMEM;
  116. signal_cb->handle = h_synx;
  117. signal_cb->flag = SYNX_SIGNAL_FROM_FENCE;
  118. signal_cb->synx_obj = synx_obj;
  119. /* get reference on synx coredata for signal cb */
  120. synx_util_get_object(synx_obj);
  121. /*
  122. * adding callback enables synx framework to
  123. * get notified on signal from clients using
  124. * native dma fence operations.
  125. */
  126. rc = dma_fence_add_callback(synx_obj->fence,
  127. &signal_cb->fence_cb, synx_fence_callback);
  128. if (rc != 0) {
  129. if (rc == -ENOENT) {
  130. if (synx_util_is_global_object(synx_obj)) {
  131. /* signal (if) global handle */
  132. rc = synx_global_update_status(
  133. synx_obj->global_idx,
  134. synx_util_get_object_status(synx_obj));
  135. if (rc != SYNX_SUCCESS)
  136. dprintk(SYNX_ERR,
  137. "status update of %u with fence %pK\n",
  138. synx_obj->global_idx, synx_obj->fence);
  139. } else {
  140. rc = SYNX_SUCCESS;
  141. }
  142. } else {
  143. dprintk(SYNX_ERR,
  144. "error adding callback for %pK err %d\n",
  145. synx_obj->fence, rc);
  146. }
  147. synx_util_put_object(synx_obj);
  148. kfree(signal_cb);
  149. return rc;
  150. }
  151. synx_obj->signal_cb = signal_cb;
  152. dprintk(SYNX_VERB, "added callback %pK to fence %pK\n",
  153. signal_cb, synx_obj->fence);
  154. return SYNX_SUCCESS;
  155. }
  156. int synx_util_init_group_coredata(struct synx_coredata *synx_obj,
  157. struct dma_fence **fences,
  158. struct synx_merge_params *params,
  159. u32 num_objs,
  160. u64 dma_context)
  161. {
  162. int rc;
  163. struct dma_fence_array *array;
  164. if (IS_ERR_OR_NULL(synx_obj))
  165. return -SYNX_INVALID;
  166. if (params->flags & SYNX_MERGE_GLOBAL_FENCE) {
  167. rc = synx_alloc_global_handle(params->h_merged_obj);
  168. synx_obj->global_idx =
  169. synx_util_global_idx(*params->h_merged_obj);
  170. } else {
  171. rc = synx_alloc_local_handle(params->h_merged_obj);
  172. }
  173. if (rc != SYNX_SUCCESS)
  174. return rc;
  175. array = dma_fence_array_create(num_objs, fences,
  176. dma_context, 1, false);
  177. if (IS_ERR_OR_NULL(array))
  178. return -SYNX_INVALID;
  179. synx_obj->fence = &array->base;
  180. synx_obj->map_count = 1;
  181. synx_obj->type = params->flags;
  182. synx_obj->type |= SYNX_CREATE_MERGED_FENCE;
  183. synx_obj->num_bound_synxs = 0;
  184. kref_init(&synx_obj->refcount);
  185. mutex_init(&synx_obj->obj_lock);
  186. INIT_LIST_HEAD(&synx_obj->reg_cbs_list);
  187. synx_obj->status = synx_util_get_object_status(synx_obj);
  188. synx_util_activate(synx_obj);
  189. return rc;
  190. }
  191. static void synx_util_destroy_coredata(struct kref *kref)
  192. {
  193. int rc;
  194. struct synx_coredata *synx_obj =
  195. container_of(kref, struct synx_coredata, refcount);
  196. if (synx_util_is_global_object(synx_obj)) {
  197. rc = synx_global_clear_subscribed_core(synx_obj->global_idx, SYNX_CORE_APSS);
  198. if (rc)
  199. dprintk(SYNX_ERR, "Failed to clear subscribers");
  200. synx_global_put_ref(synx_obj->global_idx);
  201. }
  202. synx_util_object_destroy(synx_obj);
  203. }
  204. void synx_util_get_object(struct synx_coredata *synx_obj)
  205. {
  206. kref_get(&synx_obj->refcount);
  207. }
  208. void synx_util_put_object(struct synx_coredata *synx_obj)
  209. {
  210. kref_put(&synx_obj->refcount, synx_util_destroy_coredata);
  211. }
  212. int synx_util_cleanup_merged_fence(struct synx_coredata *synx_obj, int status)
  213. {
  214. struct dma_fence_array *array = NULL;
  215. u32 i;
  216. int rc = 0;
  217. if (IS_ERR_OR_NULL(synx_obj) || IS_ERR_OR_NULL(synx_obj->fence))
  218. return -SYNX_INVALID;
  219. if (dma_fence_is_array(synx_obj->fence)) {
  220. array = to_dma_fence_array(synx_obj->fence);
  221. if (IS_ERR_OR_NULL(array))
  222. return -SYNX_INVALID;
  223. for (i = 0; i < array->num_fences; i++) {
  224. if (kref_read(&array->fences[i]->refcount) == 1 &&
  225. __fence_state(array->fences[i], false) == SYNX_STATE_ACTIVE) {
  226. dma_fence_set_error(array->fences[i],
  227. -SYNX_STATE_SIGNALED_CANCEL);
  228. rc = dma_fence_signal(array->fences[i]);
  229. if (rc)
  230. dprintk(SYNX_ERR,
  231. "signaling child fence %pK failed=%d\n",
  232. array->fences[i], rc);
  233. }
  234. dma_fence_put(array->fences[i]);
  235. }
  236. }
  237. return rc;
  238. }
  239. void synx_util_object_destroy(struct synx_coredata *synx_obj)
  240. {
  241. int rc;
  242. u32 i;
  243. s32 sync_id;
  244. u32 type;
  245. unsigned long flags;
  246. struct synx_cb_data *synx_cb, *synx_cb_temp;
  247. struct synx_bind_desc *bind_desc;
  248. struct bind_operations *bind_ops;
  249. struct synx_external_data *data;
  250. /* clear all the undispatched callbacks */
  251. list_for_each_entry_safe(synx_cb,
  252. synx_cb_temp, &synx_obj->reg_cbs_list, node) {
  253. dprintk(SYNX_ERR,
  254. "dipatching un-released callbacks of session %pK\n",
  255. synx_cb->session);
  256. synx_cb->status = SYNX_STATE_SIGNALED_CANCEL;
  257. list_del_init(&synx_cb->node);
  258. queue_work(synx_dev->wq_cb,
  259. &synx_cb->cb_dispatch);
  260. dprintk(SYNX_VERB, "dispatched callback for fence %pKn", synx_obj->fence);
  261. }
  262. for (i = 0; i < synx_obj->num_bound_synxs; i++) {
  263. bind_desc = &synx_obj->bound_synxs[i];
  264. sync_id = bind_desc->external_desc.id;
  265. type = bind_desc->external_desc.type;
  266. data = bind_desc->external_data;
  267. bind_ops = synx_util_get_bind_ops(type);
  268. if (IS_ERR_OR_NULL(bind_ops)) {
  269. dprintk(SYNX_ERR,
  270. "bind ops fail id: %d, type: %u, err: %d\n",
  271. sync_id, type, rc);
  272. continue;
  273. }
  274. /* clear the hash table entry */
  275. synx_util_remove_data(&sync_id, type);
  276. rc = bind_ops->deregister_callback(
  277. synx_external_callback, data, sync_id);
  278. if (rc < 0) {
  279. dprintk(SYNX_ERR,
  280. "de-registration fail id: %d, type: %u, err: %d\n",
  281. sync_id, type, rc);
  282. continue;
  283. }
  284. /*
  285. * release the memory allocated for external data.
  286. * It is safe to release this memory
  287. * only if deregistration is successful.
  288. */
  289. kfree(data);
  290. }
  291. mutex_destroy(&synx_obj->obj_lock);
  292. synx_util_release_fence_entry((u64)synx_obj->fence);
  293. /* dma fence framework expects handles are signaled before release,
  294. * so signal if active handle and has last refcount. Synx handles
  295. * on other cores are still active to carry out usual callflow.
  296. */
  297. if (!IS_ERR_OR_NULL(synx_obj->fence)) {
  298. spin_lock_irqsave(synx_obj->fence->lock, flags);
  299. if (synx_util_is_merged_object(synx_obj) &&
  300. synx_util_get_object_status_locked(synx_obj) == SYNX_STATE_ACTIVE)
  301. rc = synx_util_cleanup_merged_fence(synx_obj, -SYNX_STATE_SIGNALED_CANCEL);
  302. else if (kref_read(&synx_obj->fence->refcount) == 1 &&
  303. (synx_util_get_object_status_locked(synx_obj) ==
  304. SYNX_STATE_ACTIVE)) {
  305. // set fence error to cancel
  306. dma_fence_set_error(synx_obj->fence,
  307. -SYNX_STATE_SIGNALED_CANCEL);
  308. rc = dma_fence_signal_locked(synx_obj->fence);
  309. }
  310. spin_unlock_irqrestore(synx_obj->fence->lock, flags);
  311. if (rc)
  312. dprintk(SYNX_ERR,
  313. "signaling fence %pK failed=%d\n",
  314. synx_obj->fence, rc);
  315. }
  316. dma_fence_put(synx_obj->fence);
  317. kfree(synx_obj);
  318. dprintk(SYNX_MEM, "released synx object %pK\n", synx_obj);
  319. }
  320. long synx_util_get_free_handle(unsigned long *bitmap, unsigned int size)
  321. {
  322. bool bit;
  323. long idx;
  324. do {
  325. idx = find_first_zero_bit(bitmap, size);
  326. if (idx >= size)
  327. break;
  328. bit = test_and_set_bit(idx, bitmap);
  329. } while (bit);
  330. return idx;
  331. }
  332. u32 synx_encode_handle(u32 idx, u32 core_id, bool global_idx)
  333. {
  334. u32 handle = 0;
  335. if (idx >= SYNX_MAX_OBJS)
  336. return 0;
  337. if (global_idx) {
  338. handle = 1;
  339. handle <<= SYNX_HANDLE_CORE_BITS;
  340. }
  341. handle |= core_id;
  342. handle <<= SYNX_HANDLE_INDEX_BITS;
  343. handle |= idx;
  344. return handle;
  345. }
  346. int synx_alloc_global_handle(u32 *new_synx)
  347. {
  348. int rc;
  349. u32 idx;
  350. rc = synx_global_alloc_index(&idx);
  351. if (rc != SYNX_SUCCESS)
  352. return rc;
  353. *new_synx = synx_encode_handle(idx, SYNX_CORE_APSS, true);
  354. dprintk(SYNX_DBG, "allocated global handle %u (0x%x)\n",
  355. *new_synx, *new_synx);
  356. rc = synx_global_init_coredata(*new_synx);
  357. return rc;
  358. }
  359. int synx_alloc_local_handle(u32 *new_synx)
  360. {
  361. u32 idx;
  362. idx = synx_util_get_free_handle(synx_dev->native->bitmap,
  363. SYNX_MAX_OBJS);
  364. if (idx >= SYNX_MAX_OBJS)
  365. return -SYNX_NOMEM;
  366. *new_synx = synx_encode_handle(idx, SYNX_CORE_APSS, false);
  367. dprintk(SYNX_DBG, "allocated local handle %u (0x%x)\n",
  368. *new_synx, *new_synx);
  369. return SYNX_SUCCESS;
  370. }
  371. int synx_util_init_handle(struct synx_client *client,
  372. struct synx_coredata *synx_obj, u32 *new_h_synx,
  373. void *map_entry)
  374. {
  375. int rc = SYNX_SUCCESS;
  376. bool found = false;
  377. struct synx_handle_coredata *synx_data, *curr;
  378. if (IS_ERR_OR_NULL(client) || IS_ERR_OR_NULL(synx_obj) ||
  379. IS_ERR_OR_NULL(new_h_synx) || IS_ERR_OR_NULL(map_entry))
  380. return -SYNX_INVALID;
  381. synx_data = kzalloc(sizeof(*synx_data), GFP_ATOMIC);
  382. if (IS_ERR_OR_NULL(synx_data))
  383. return -SYNX_NOMEM;
  384. synx_data->client = client;
  385. synx_data->synx_obj = synx_obj;
  386. synx_data->key = *new_h_synx;
  387. synx_data->map_entry = map_entry;
  388. kref_init(&synx_data->refcount);
  389. synx_data->rel_count = 1;
  390. spin_lock_bh(&client->handle_map_lock);
  391. hash_for_each_possible(client->handle_map,
  392. curr, node, *new_h_synx) {
  393. if (curr->key == *new_h_synx) {
  394. if (curr->synx_obj != synx_obj) {
  395. rc = -SYNX_INVALID;
  396. dprintk(SYNX_ERR,
  397. "inconsistent data in handle map\n");
  398. } else {
  399. kref_get(&curr->refcount);
  400. curr->rel_count++;
  401. }
  402. found = true;
  403. break;
  404. }
  405. }
  406. if (unlikely(found))
  407. kfree(synx_data);
  408. else
  409. hash_add(client->handle_map,
  410. &synx_data->node, *new_h_synx);
  411. spin_unlock_bh(&client->handle_map_lock);
  412. return rc;
  413. }
  414. int synx_util_activate(struct synx_coredata *synx_obj)
  415. {
  416. if (IS_ERR_OR_NULL(synx_obj))
  417. return -SYNX_INVALID;
  418. /* move synx to ACTIVE state and register cb for merged object */
  419. dma_fence_enable_sw_signaling(synx_obj->fence);
  420. return 0;
  421. }
  422. static u32 synx_util_get_references(struct synx_coredata *synx_obj)
  423. {
  424. u32 count = 0;
  425. u32 i = 0;
  426. struct dma_fence_array *array = NULL;
  427. /* obtain dma fence reference */
  428. if (dma_fence_is_array(synx_obj->fence)) {
  429. array = to_dma_fence_array(synx_obj->fence);
  430. if (IS_ERR_OR_NULL(array))
  431. return 0;
  432. for (i = 0; i < array->num_fences; i++)
  433. dma_fence_get(array->fences[i]);
  434. count = array->num_fences;
  435. } else {
  436. dma_fence_get(synx_obj->fence);
  437. count = 1;
  438. }
  439. return count;
  440. }
  441. static void synx_util_put_references(struct synx_coredata *synx_obj)
  442. {
  443. u32 i = 0;
  444. struct dma_fence_array *array = NULL;
  445. if (dma_fence_is_array(synx_obj->fence)) {
  446. array = to_dma_fence_array(synx_obj->fence);
  447. if (IS_ERR_OR_NULL(array))
  448. return;
  449. for (i = 0; i < array->num_fences; i++)
  450. dma_fence_put(array->fences[i]);
  451. } else {
  452. dma_fence_put(synx_obj->fence);
  453. }
  454. }
  455. static u32 synx_util_add_fence(struct synx_coredata *synx_obj,
  456. struct dma_fence **fences,
  457. u32 idx)
  458. {
  459. struct dma_fence_array *array = NULL;
  460. u32 i = 0;
  461. if (dma_fence_is_array(synx_obj->fence)) {
  462. array = to_dma_fence_array(synx_obj->fence);
  463. if (IS_ERR_OR_NULL(array))
  464. return 0;
  465. for (i = 0; i < array->num_fences; i++)
  466. fences[idx+i] = array->fences[i];
  467. return array->num_fences;
  468. }
  469. fences[idx] = synx_obj->fence;
  470. return 1;
  471. }
  472. static u32 synx_util_remove_duplicates(struct dma_fence **arr, u32 num)
  473. {
  474. int i, j;
  475. u32 wr_idx = 1;
  476. if (IS_ERR_OR_NULL(arr)) {
  477. dprintk(SYNX_ERR, "invalid input array\n");
  478. return 0;
  479. }
  480. for (i = 1; i < num; i++) {
  481. for (j = 0; j < wr_idx ; j++) {
  482. if (arr[i] == arr[j]) {
  483. /* release reference obtained for duplicate */
  484. dprintk(SYNX_DBG,
  485. "releasing duplicate reference\n");
  486. dma_fence_put(arr[i]);
  487. break;
  488. }
  489. }
  490. if (j == wr_idx)
  491. arr[wr_idx++] = arr[i];
  492. }
  493. return wr_idx;
  494. }
  495. s32 synx_util_merge_error(struct synx_client *client,
  496. u32 *h_synxs,
  497. u32 num_objs)
  498. {
  499. u32 i = 0;
  500. struct synx_handle_coredata *synx_data;
  501. struct synx_coredata *synx_obj;
  502. if (IS_ERR_OR_NULL(client) || IS_ERR_OR_NULL(h_synxs))
  503. return -SYNX_INVALID;
  504. for (i = 0; i < num_objs; i++) {
  505. synx_data = synx_util_acquire_handle(client, h_synxs[i]);
  506. synx_obj = synx_util_obtain_object(synx_data);
  507. if (IS_ERR_OR_NULL(synx_obj) ||
  508. IS_ERR_OR_NULL(synx_obj->fence)) {
  509. dprintk(SYNX_ERR,
  510. "[sess :%llu] invalid handle %d in cleanup\n",
  511. client->id, h_synxs[i]);
  512. continue;
  513. }
  514. /* release all references obtained during merge validatation */
  515. synx_util_put_references(synx_obj);
  516. synx_util_release_handle(synx_data);
  517. }
  518. return 0;
  519. }
  520. int synx_util_validate_merge(struct synx_client *client,
  521. u32 *h_synxs,
  522. u32 num_objs,
  523. struct dma_fence ***fence_list,
  524. u32 *fence_cnt)
  525. {
  526. u32 count = 0;
  527. u32 i = 0;
  528. struct synx_handle_coredata **synx_datas;
  529. struct synx_coredata **synx_objs;
  530. struct dma_fence **fences = NULL;
  531. if (num_objs <= 1) {
  532. dprintk(SYNX_ERR, "single handle merge is not allowed\n");
  533. return -SYNX_INVALID;
  534. }
  535. synx_datas = kcalloc(num_objs, sizeof(*synx_datas), GFP_KERNEL);
  536. if (IS_ERR_OR_NULL(synx_datas))
  537. return -SYNX_NOMEM;
  538. synx_objs = kcalloc(num_objs, sizeof(*synx_objs), GFP_KERNEL);
  539. if (IS_ERR_OR_NULL(synx_objs)) {
  540. kfree(synx_datas);
  541. return -SYNX_NOMEM;
  542. }
  543. for (i = 0; i < num_objs; i++) {
  544. synx_datas[i] = synx_util_acquire_handle(client, h_synxs[i]);
  545. synx_objs[i] = synx_util_obtain_object(synx_datas[i]);
  546. if (IS_ERR_OR_NULL(synx_objs[i]) ||
  547. IS_ERR_OR_NULL(synx_objs[i]->fence)) {
  548. dprintk(SYNX_ERR,
  549. "[sess :%llu] invalid handle %d in merge list\n",
  550. client->id, h_synxs[i]);
  551. *fence_cnt = i;
  552. goto error;
  553. }
  554. count += synx_util_get_references(synx_objs[i]);
  555. }
  556. fences = kcalloc(count, sizeof(*fences), GFP_KERNEL);
  557. if (IS_ERR_OR_NULL(fences)) {
  558. *fence_cnt = num_objs;
  559. goto error;
  560. }
  561. /* memory will be released later in the invoking function */
  562. *fence_list = fences;
  563. count = 0;
  564. for (i = 0; i < num_objs; i++) {
  565. count += synx_util_add_fence(synx_objs[i], fences, count);
  566. /* release the reference obtained earlier in the function */
  567. synx_util_release_handle(synx_datas[i]);
  568. }
  569. *fence_cnt = synx_util_remove_duplicates(fences, count);
  570. kfree(synx_objs);
  571. kfree(synx_datas);
  572. return 0;
  573. error:
  574. /* release the reference/s obtained earlier in the function */
  575. for (i = 0; i < *fence_cnt; i++) {
  576. synx_util_put_references(synx_objs[i]);
  577. synx_util_release_handle(synx_datas[i]);
  578. }
  579. *fence_cnt = 0;
  580. kfree(synx_objs);
  581. kfree(synx_datas);
  582. return -SYNX_INVALID;
  583. }
  584. static u32 __fence_state(struct dma_fence *fence, bool locked)
  585. {
  586. s32 status;
  587. u32 state = SYNX_STATE_INVALID;
  588. if (IS_ERR_OR_NULL(fence)) {
  589. dprintk(SYNX_ERR, "invalid fence\n");
  590. return SYNX_STATE_INVALID;
  591. }
  592. if (locked)
  593. status = dma_fence_get_status_locked(fence);
  594. else
  595. status = dma_fence_get_status(fence);
  596. /* convert fence status to synx state */
  597. switch (status) {
  598. case 0:
  599. state = SYNX_STATE_ACTIVE;
  600. break;
  601. case 1:
  602. state = SYNX_STATE_SIGNALED_SUCCESS;
  603. break;
  604. case -SYNX_STATE_SIGNALED_CANCEL:
  605. state = SYNX_STATE_SIGNALED_CANCEL;
  606. break;
  607. case -SYNX_STATE_SIGNALED_EXTERNAL:
  608. state = SYNX_STATE_SIGNALED_EXTERNAL;
  609. break;
  610. case -SYNX_STATE_SIGNALED_ERROR:
  611. state = SYNX_STATE_SIGNALED_ERROR;
  612. break;
  613. default:
  614. state = (u32)(-status);
  615. }
  616. return state;
  617. }
  618. static u32 __fence_group_state(struct dma_fence *fence, bool locked)
  619. {
  620. u32 i = 0;
  621. u32 state = SYNX_STATE_INVALID, parent_state = SYNX_STATE_INVALID;
  622. struct dma_fence_array *array = NULL;
  623. u32 intr, actv_cnt, sig_cnt, err_cnt;
  624. if (IS_ERR_OR_NULL(fence)) {
  625. dprintk(SYNX_ERR, "invalid fence\n");
  626. return SYNX_STATE_INVALID;
  627. }
  628. actv_cnt = sig_cnt = err_cnt = 0;
  629. array = to_dma_fence_array(fence);
  630. if (IS_ERR_OR_NULL(array))
  631. return SYNX_STATE_INVALID;
  632. for (i = 0; i < array->num_fences; i++) {
  633. intr = __fence_state(array->fences[i], locked);
  634. if (err_cnt == 0)
  635. parent_state = intr;
  636. switch (intr) {
  637. case SYNX_STATE_ACTIVE:
  638. actv_cnt++;
  639. break;
  640. case SYNX_STATE_SIGNALED_SUCCESS:
  641. sig_cnt++;
  642. break;
  643. default:
  644. intr > SYNX_STATE_SIGNALED_MAX ? sig_cnt++ : err_cnt++;
  645. }
  646. }
  647. dprintk(SYNX_DBG,
  648. "group cnt stats act:%u, sig: %u, err: %u\n",
  649. actv_cnt, sig_cnt, err_cnt);
  650. if (actv_cnt)
  651. state = SYNX_STATE_ACTIVE;
  652. else
  653. state = parent_state;
  654. return state;
  655. }
  656. /*
  657. * WARN: Should not hold the fence spinlock when invoking
  658. * this function. Use synx_fence_state_locked instead
  659. */
  660. u32 synx_util_get_object_status(struct synx_coredata *synx_obj)
  661. {
  662. u32 state;
  663. if (IS_ERR_OR_NULL(synx_obj))
  664. return SYNX_STATE_INVALID;
  665. if (synx_util_is_merged_object(synx_obj))
  666. state = __fence_group_state(synx_obj->fence, false);
  667. else
  668. state = __fence_state(synx_obj->fence, false);
  669. return state;
  670. }
  671. /* use this for status check when holding on to metadata spinlock */
  672. u32 synx_util_get_object_status_locked(struct synx_coredata *synx_obj)
  673. {
  674. u32 state;
  675. if (IS_ERR_OR_NULL(synx_obj))
  676. return SYNX_STATE_INVALID;
  677. if (synx_util_is_merged_object(synx_obj))
  678. state = __fence_group_state(synx_obj->fence, true);
  679. else
  680. state = __fence_state(synx_obj->fence, true);
  681. return state;
  682. }
  683. struct synx_handle_coredata *synx_util_acquire_handle(
  684. struct synx_client *client, u32 h_synx)
  685. {
  686. struct synx_handle_coredata *synx_data = NULL;
  687. struct synx_handle_coredata *synx_handle =
  688. ERR_PTR(-SYNX_NOENT);
  689. if (IS_ERR_OR_NULL(client))
  690. return ERR_PTR(-SYNX_INVALID);
  691. spin_lock_bh(&client->handle_map_lock);
  692. hash_for_each_possible(client->handle_map,
  693. synx_data, node, h_synx) {
  694. if (synx_data->key == h_synx &&
  695. synx_data->rel_count != 0) {
  696. kref_get(&synx_data->refcount);
  697. synx_handle = synx_data;
  698. break;
  699. }
  700. }
  701. spin_unlock_bh(&client->handle_map_lock);
  702. return synx_handle;
  703. }
  704. struct synx_map_entry *synx_util_insert_to_map(
  705. struct synx_coredata *synx_obj,
  706. u32 h_synx, u32 flags)
  707. {
  708. struct synx_map_entry *map_entry;
  709. map_entry = kzalloc(sizeof(*map_entry), GFP_KERNEL);
  710. if (IS_ERR_OR_NULL(map_entry))
  711. return ERR_PTR(-SYNX_NOMEM);
  712. kref_init(&map_entry->refcount);
  713. map_entry->synx_obj = synx_obj;
  714. map_entry->flags = flags;
  715. map_entry->key = h_synx;
  716. if (synx_util_is_global_handle(h_synx)) {
  717. spin_lock_bh(&synx_dev->native->global_map_lock);
  718. hash_add(synx_dev->native->global_map,
  719. &map_entry->node, h_synx);
  720. spin_unlock_bh(&synx_dev->native->global_map_lock);
  721. dprintk(SYNX_MEM,
  722. "added handle %u to global map %pK\n",
  723. h_synx, map_entry);
  724. } else {
  725. spin_lock_bh(&synx_dev->native->local_map_lock);
  726. hash_add(synx_dev->native->local_map,
  727. &map_entry->node, h_synx);
  728. spin_unlock_bh(&synx_dev->native->local_map_lock);
  729. dprintk(SYNX_MEM,
  730. "added handle %u to local map %pK\n",
  731. h_synx, map_entry);
  732. }
  733. return map_entry;
  734. }
  735. struct synx_map_entry *synx_util_get_map_entry(u32 h_synx)
  736. {
  737. struct synx_map_entry *curr;
  738. struct synx_map_entry *map_entry = ERR_PTR(-SYNX_NOENT);
  739. if (h_synx == 0)
  740. return ERR_PTR(-SYNX_INVALID);
  741. if (synx_util_is_global_handle(h_synx)) {
  742. spin_lock_bh(&synx_dev->native->global_map_lock);
  743. hash_for_each_possible(synx_dev->native->global_map,
  744. curr, node, h_synx) {
  745. if (curr->key == h_synx) {
  746. kref_get(&curr->refcount);
  747. map_entry = curr;
  748. break;
  749. }
  750. }
  751. spin_unlock_bh(&synx_dev->native->global_map_lock);
  752. } else {
  753. spin_lock_bh(&synx_dev->native->local_map_lock);
  754. hash_for_each_possible(synx_dev->native->local_map,
  755. curr, node, h_synx) {
  756. if (curr->key == h_synx) {
  757. kref_get(&curr->refcount);
  758. map_entry = curr;
  759. break;
  760. }
  761. }
  762. spin_unlock_bh(&synx_dev->native->local_map_lock);
  763. }
  764. /* should we allocate if entry not found? */
  765. return map_entry;
  766. }
  767. static void synx_util_cleanup_fence(
  768. struct synx_coredata *synx_obj)
  769. {
  770. struct synx_signal_cb *signal_cb;
  771. unsigned long flags;
  772. u32 g_status;
  773. u32 f_status;
  774. u32 h_synx = 0;
  775. mutex_lock(&synx_obj->obj_lock);
  776. synx_obj->map_count--;
  777. signal_cb = synx_obj->signal_cb;
  778. f_status = synx_util_get_object_status(synx_obj);
  779. dprintk(SYNX_VERB, "f_status:%u, signal_cb:%p, map:%u, idx:%u\n",
  780. f_status, signal_cb, synx_obj->map_count, synx_obj->global_idx);
  781. if (synx_obj->map_count == 0 &&
  782. (signal_cb != NULL) &&
  783. (synx_obj->global_idx != 0) &&
  784. (f_status == SYNX_STATE_ACTIVE)) {
  785. /*
  786. * no more clients interested for notification
  787. * on handle on local core.
  788. * remove reference held by callback on synx
  789. * coredata structure and update cb (if still
  790. * un-signaled) with global handle idx to
  791. * notify any cross-core clients waiting on
  792. * handle.
  793. */
  794. g_status = synx_global_get_status(synx_obj->global_idx);
  795. if (g_status > SYNX_STATE_ACTIVE) {
  796. dprintk(SYNX_DBG, "signaling fence %pK with status %u\n",
  797. synx_obj->fence, g_status);
  798. synx_native_signal_fence(synx_obj, g_status);
  799. } else {
  800. spin_lock_irqsave(synx_obj->fence->lock, flags);
  801. if (synx_util_get_object_status_locked(synx_obj) ==
  802. SYNX_STATE_ACTIVE) {
  803. signal_cb->synx_obj = NULL;
  804. synx_global_fetch_handle_details(synx_obj->global_idx, &h_synx);
  805. signal_cb->handle = h_synx;
  806. synx_obj->signal_cb = NULL;
  807. /*
  808. * release reference held by signal cb and
  809. * get reference on global index instead.
  810. */
  811. synx_util_put_object(synx_obj);
  812. synx_global_get_ref(synx_obj->global_idx);
  813. }
  814. spin_unlock_irqrestore(synx_obj->fence->lock, flags);
  815. }
  816. } else if (synx_obj->map_count == 0 && signal_cb &&
  817. (f_status == SYNX_STATE_ACTIVE)) {
  818. if (dma_fence_remove_callback(synx_obj->fence,
  819. &signal_cb->fence_cb)) {
  820. kfree(signal_cb);
  821. synx_obj->signal_cb = NULL;
  822. /*
  823. * release reference held by signal cb and
  824. * get reference on global index instead.
  825. */
  826. synx_util_put_object(synx_obj);
  827. dprintk(SYNX_MEM, "signal cb destroyed %pK\n",
  828. synx_obj->signal_cb);
  829. }
  830. }
  831. mutex_unlock(&synx_obj->obj_lock);
  832. }
  833. static void synx_util_destroy_map_entry_worker(
  834. struct work_struct *dispatch)
  835. {
  836. struct synx_map_entry *map_entry =
  837. container_of(dispatch, struct synx_map_entry, dispatch);
  838. struct synx_coredata *synx_obj;
  839. synx_obj = map_entry->synx_obj;
  840. if (!IS_ERR_OR_NULL(synx_obj)) {
  841. synx_util_cleanup_fence(synx_obj);
  842. /* release reference held by map entry */
  843. synx_util_put_object(synx_obj);
  844. }
  845. if (!synx_util_is_global_handle(map_entry->key))
  846. clear_bit(synx_util_global_idx(map_entry->key),
  847. synx_dev->native->bitmap);
  848. dprintk(SYNX_VERB, "map entry for %u destroyed %pK\n",
  849. map_entry->key, map_entry);
  850. kfree(map_entry);
  851. }
  852. static void synx_util_destroy_map_entry(struct kref *kref)
  853. {
  854. struct synx_map_entry *map_entry =
  855. container_of(kref, struct synx_map_entry, refcount);
  856. hash_del(&map_entry->node);
  857. dprintk(SYNX_MEM, "map entry for %u removed %pK\n",
  858. map_entry->key, map_entry);
  859. INIT_WORK(&map_entry->dispatch, synx_util_destroy_map_entry_worker);
  860. queue_work(synx_dev->wq_cleanup, &map_entry->dispatch);
  861. }
  862. void synx_util_release_map_entry(struct synx_map_entry *map_entry)
  863. {
  864. spinlock_t *lock;
  865. if (IS_ERR_OR_NULL(map_entry))
  866. return;
  867. if (synx_util_is_global_handle(map_entry->key))
  868. lock = &synx_dev->native->global_map_lock;
  869. else
  870. lock = &synx_dev->native->local_map_lock;
  871. spin_lock_bh(lock);
  872. kref_put(&map_entry->refcount,
  873. synx_util_destroy_map_entry);
  874. spin_unlock_bh(lock);
  875. }
  876. static void synx_util_destroy_handle_worker(
  877. struct work_struct *dispatch)
  878. {
  879. struct synx_handle_coredata *synx_data =
  880. container_of(dispatch, struct synx_handle_coredata,
  881. dispatch);
  882. synx_util_release_map_entry(synx_data->map_entry);
  883. dprintk(SYNX_VERB, "handle %u destroyed %pK\n",
  884. synx_data->key, synx_data);
  885. kfree(synx_data);
  886. }
  887. static void synx_util_destroy_handle(struct kref *kref)
  888. {
  889. struct synx_handle_coredata *synx_data =
  890. container_of(kref, struct synx_handle_coredata,
  891. refcount);
  892. hash_del(&synx_data->node);
  893. dprintk(SYNX_MEM, "[sess :%llu] handle %u removed %pK\n",
  894. synx_data->client->id, synx_data->key, synx_data);
  895. INIT_WORK(&synx_data->dispatch, synx_util_destroy_handle_worker);
  896. queue_work(synx_dev->wq_cleanup, &synx_data->dispatch);
  897. }
  898. void synx_util_release_handle(struct synx_handle_coredata *synx_data)
  899. {
  900. struct synx_client *client;
  901. if (IS_ERR_OR_NULL(synx_data))
  902. return;
  903. client = synx_data->client;
  904. if (IS_ERR_OR_NULL(client))
  905. return;
  906. spin_lock_bh(&client->handle_map_lock);
  907. kref_put(&synx_data->refcount,
  908. synx_util_destroy_handle);
  909. spin_unlock_bh(&client->handle_map_lock);
  910. }
  911. struct bind_operations *synx_util_get_bind_ops(u32 type)
  912. {
  913. struct synx_registered_ops *client_ops;
  914. if (!synx_util_is_valid_bind_type(type))
  915. return NULL;
  916. mutex_lock(&synx_dev->vtbl_lock);
  917. client_ops = &synx_dev->bind_vtbl[type];
  918. if (!client_ops->valid) {
  919. mutex_unlock(&synx_dev->vtbl_lock);
  920. return NULL;
  921. }
  922. mutex_unlock(&synx_dev->vtbl_lock);
  923. return &client_ops->ops;
  924. }
  925. int synx_util_alloc_cb_entry(struct synx_client *client,
  926. struct synx_kernel_payload *data,
  927. u32 *cb_idx)
  928. {
  929. long idx;
  930. struct synx_client_cb *cb;
  931. if (IS_ERR_OR_NULL(client) || IS_ERR_OR_NULL(data) ||
  932. IS_ERR_OR_NULL(cb_idx))
  933. return -SYNX_INVALID;
  934. idx = synx_util_get_free_handle(client->cb_bitmap, SYNX_MAX_OBJS);
  935. if (idx >= SYNX_MAX_OBJS) {
  936. dprintk(SYNX_ERR,
  937. "[sess :%llu] free cb index not available\n",
  938. client->id);
  939. return -SYNX_NOMEM;
  940. }
  941. cb = &client->cb_table[idx];
  942. memset(cb, 0, sizeof(*cb));
  943. cb->is_valid = true;
  944. cb->client = client;
  945. cb->idx = idx;
  946. memcpy(&cb->kernel_cb, data,
  947. sizeof(cb->kernel_cb));
  948. *cb_idx = idx;
  949. dprintk(SYNX_VERB, "[sess :%llu] allocated cb index %u\n",
  950. client->id, *cb_idx);
  951. return 0;
  952. }
  953. int synx_util_clear_cb_entry(struct synx_client *client,
  954. struct synx_client_cb *cb)
  955. {
  956. int rc = 0;
  957. u32 idx;
  958. if (IS_ERR_OR_NULL(cb))
  959. return -SYNX_INVALID;
  960. idx = cb->idx;
  961. memset(cb, 0, sizeof(*cb));
  962. if (idx && idx < SYNX_MAX_OBJS) {
  963. clear_bit(idx, client->cb_bitmap);
  964. } else {
  965. dprintk(SYNX_ERR, "invalid index\n");
  966. rc = -SYNX_INVALID;
  967. }
  968. return rc;
  969. }
  970. void synx_util_default_user_callback(u32 h_synx,
  971. int status, void *data)
  972. {
  973. struct synx_client_cb *cb = data;
  974. struct synx_client *client = NULL;
  975. if (cb && cb->client) {
  976. client = cb->client;
  977. dprintk(SYNX_VERB,
  978. "[sess :%llu] user cb queued for handle %d\n",
  979. client->id, h_synx);
  980. cb->kernel_cb.status = status;
  981. mutex_lock(&client->event_q_lock);
  982. list_add_tail(&cb->node, &client->event_q);
  983. mutex_unlock(&client->event_q_lock);
  984. wake_up_all(&client->event_wq);
  985. } else {
  986. dprintk(SYNX_ERR, "invalid params\n");
  987. }
  988. }
  989. void synx_util_callback_dispatch(struct synx_coredata *synx_obj, u32 status)
  990. {
  991. struct synx_cb_data *synx_cb, *synx_cb_temp;
  992. if (IS_ERR_OR_NULL(synx_obj)) {
  993. dprintk(SYNX_ERR, "invalid arguments\n");
  994. return;
  995. }
  996. list_for_each_entry_safe(synx_cb,
  997. synx_cb_temp, &synx_obj->reg_cbs_list, node) {
  998. synx_cb->status = status;
  999. list_del_init(&synx_cb->node);
  1000. queue_work(synx_dev->wq_cb,
  1001. &synx_cb->cb_dispatch);
  1002. dprintk(SYNX_VERB, "dispatched callback\n");
  1003. }
  1004. }
  1005. void synx_util_cb_dispatch(struct work_struct *cb_dispatch)
  1006. {
  1007. struct synx_cb_data *synx_cb =
  1008. container_of(cb_dispatch, struct synx_cb_data, cb_dispatch);
  1009. struct synx_client *client;
  1010. struct synx_client_cb *cb;
  1011. struct synx_kernel_payload payload;
  1012. u32 status;
  1013. client = synx_get_client(synx_cb->session);
  1014. if (IS_ERR_OR_NULL(client)) {
  1015. dprintk(SYNX_ERR,
  1016. "invalid session data %pK in cb payload\n",
  1017. synx_cb->session);
  1018. goto free;
  1019. }
  1020. if (synx_cb->idx == 0 ||
  1021. synx_cb->idx >= SYNX_MAX_OBJS) {
  1022. dprintk(SYNX_ERR,
  1023. "[sess :%llu] invalid cb index %u\n",
  1024. client->id, synx_cb->idx);
  1025. goto fail;
  1026. }
  1027. status = synx_cb->status;
  1028. cb = &client->cb_table[synx_cb->idx];
  1029. if (!cb->is_valid) {
  1030. dprintk(SYNX_ERR, "invalid cb payload\n");
  1031. goto fail;
  1032. }
  1033. memcpy(&payload, &cb->kernel_cb, sizeof(cb->kernel_cb));
  1034. payload.status = status;
  1035. if (payload.cb_func == synx_util_default_user_callback) {
  1036. /*
  1037. * need to send client cb data for default
  1038. * user cb (userspace cb)
  1039. */
  1040. payload.data = cb;
  1041. } else {
  1042. /*
  1043. * clear the cb entry. userspace cb entry
  1044. * will be cleared after data read by the
  1045. * polling thread or when client is destroyed
  1046. */
  1047. if (synx_util_clear_cb_entry(client, cb))
  1048. dprintk(SYNX_ERR,
  1049. "[sess :%llu] error clearing cb entry\n",
  1050. client->id);
  1051. }
  1052. dprintk(SYNX_DBG,
  1053. "callback dispatched for handle %u, status %u, data %pK\n",
  1054. payload.h_synx, payload.status, payload.data);
  1055. /* dispatch kernel callback */
  1056. payload.cb_func(payload.h_synx,
  1057. payload.status, payload.data);
  1058. fail:
  1059. synx_put_client(client);
  1060. free:
  1061. kfree(synx_cb);
  1062. }
  1063. int synx_get_child_coredata(struct synx_coredata *synx_obj, struct synx_coredata ***child_synx_obj, int *num_fences)
  1064. {
  1065. int rc = SYNX_SUCCESS;
  1066. int i = 0, handle_count = 0;
  1067. u32 h_child = 0;
  1068. struct dma_fence_array *array = NULL;
  1069. struct synx_coredata **synx_datas = NULL;
  1070. struct synx_map_entry *fence_entry = NULL;
  1071. if (IS_ERR_OR_NULL(synx_obj) || IS_ERR_OR_NULL(num_fences))
  1072. return -SYNX_INVALID;
  1073. if (dma_fence_is_array(synx_obj->fence)) {
  1074. array = to_dma_fence_array(synx_obj->fence);
  1075. if (IS_ERR_OR_NULL(array))
  1076. return -SYNX_INVALID;
  1077. synx_datas = kcalloc(array->num_fences, sizeof(*synx_datas), GFP_KERNEL);
  1078. if (IS_ERR_OR_NULL(synx_datas))
  1079. return -SYNX_NOMEM;
  1080. for (i = 0; i < array->num_fences; i++) {
  1081. h_child = synx_util_get_fence_entry((u64)array->fences[i], 1);
  1082. fence_entry = synx_util_get_map_entry(h_child);
  1083. if (IS_ERR_OR_NULL(fence_entry) || IS_ERR_OR_NULL(fence_entry->synx_obj))
  1084. {
  1085. dprintk(SYNX_ERR, "Invalid handle access %u", h_child);
  1086. rc = -SYNX_NOENT;
  1087. goto fail;
  1088. }
  1089. synx_datas[handle_count++] = fence_entry->synx_obj;
  1090. synx_util_release_map_entry(fence_entry);
  1091. }
  1092. }
  1093. *child_synx_obj = synx_datas;
  1094. *num_fences = handle_count;
  1095. return rc;
  1096. fail:
  1097. kfree(synx_datas);
  1098. return rc;
  1099. }
  1100. u32 synx_util_get_fence_entry(u64 key, u32 global)
  1101. {
  1102. u32 h_synx = 0;
  1103. struct synx_fence_entry *curr;
  1104. spin_lock_bh(&synx_dev->native->fence_map_lock);
  1105. hash_for_each_possible(synx_dev->native->fence_map,
  1106. curr, node, key) {
  1107. if (curr->key == key) {
  1108. if (global)
  1109. h_synx = curr->g_handle;
  1110. /* return local handle if global not available */
  1111. if (h_synx == 0)
  1112. h_synx = curr->l_handle;
  1113. break;
  1114. }
  1115. }
  1116. spin_unlock_bh(&synx_dev->native->fence_map_lock);
  1117. return h_synx;
  1118. }
  1119. void synx_util_release_fence_entry(u64 key)
  1120. {
  1121. struct synx_fence_entry *entry = NULL, *curr;
  1122. spin_lock_bh(&synx_dev->native->fence_map_lock);
  1123. hash_for_each_possible(synx_dev->native->fence_map,
  1124. curr, node, key) {
  1125. if (curr->key == key) {
  1126. entry = curr;
  1127. break;
  1128. }
  1129. }
  1130. if (entry) {
  1131. hash_del(&entry->node);
  1132. dprintk(SYNX_MEM,
  1133. "released fence entry %pK for fence %pK\n",
  1134. entry, (void *)key);
  1135. kfree(entry);
  1136. }
  1137. spin_unlock_bh(&synx_dev->native->fence_map_lock);
  1138. }
  1139. int synx_util_insert_fence_entry(struct synx_fence_entry *entry,
  1140. u32 *h_synx, u32 global)
  1141. {
  1142. int rc = SYNX_SUCCESS;
  1143. struct synx_fence_entry *curr;
  1144. if (IS_ERR_OR_NULL(entry) || IS_ERR_OR_NULL(h_synx))
  1145. return -SYNX_INVALID;
  1146. spin_lock_bh(&synx_dev->native->fence_map_lock);
  1147. hash_for_each_possible(synx_dev->native->fence_map,
  1148. curr, node, entry->key) {
  1149. /* raced with import from another process on same fence */
  1150. if (curr->key == entry->key) {
  1151. if (global)
  1152. *h_synx = curr->g_handle;
  1153. if (*h_synx == 0 || !global)
  1154. *h_synx = curr->l_handle;
  1155. rc = -SYNX_ALREADY;
  1156. break;
  1157. }
  1158. }
  1159. /* add entry only if its not present in the map */
  1160. if (rc == SYNX_SUCCESS) {
  1161. hash_add(synx_dev->native->fence_map,
  1162. &entry->node, entry->key);
  1163. dprintk(SYNX_MEM,
  1164. "added fence entry %pK for fence %pK\n",
  1165. entry, (void *)entry->key);
  1166. }
  1167. spin_unlock_bh(&synx_dev->native->fence_map_lock);
  1168. return rc;
  1169. }
  1170. struct synx_client *synx_get_client(struct synx_session *session)
  1171. {
  1172. struct synx_client *client = NULL;
  1173. struct synx_client *curr;
  1174. if (IS_ERR_OR_NULL(session))
  1175. return ERR_PTR(-SYNX_INVALID);
  1176. spin_lock_bh(&synx_dev->native->metadata_map_lock);
  1177. hash_for_each_possible(synx_dev->native->client_metadata_map,
  1178. curr, node, (u64)session) {
  1179. if (curr == (struct synx_client *)session) {
  1180. if (curr->active) {
  1181. kref_get(&curr->refcount);
  1182. client = curr;
  1183. }
  1184. break;
  1185. }
  1186. }
  1187. spin_unlock_bh(&synx_dev->native->metadata_map_lock);
  1188. return client;
  1189. }
  1190. static void synx_client_cleanup(struct work_struct *dispatch)
  1191. {
  1192. int i, j;
  1193. struct synx_client *client =
  1194. container_of(dispatch, struct synx_client, dispatch);
  1195. struct synx_handle_coredata *curr;
  1196. struct hlist_node *tmp;
  1197. dprintk(SYNX_INFO, "[sess :%llu] session removed %s\n",
  1198. client->id, client->name);
  1199. /*
  1200. * go over all the remaining synx obj handles
  1201. * un-released from this session and remove them.
  1202. */
  1203. hash_for_each_safe(client->handle_map, i, tmp, curr, node) {
  1204. dprintk(SYNX_WARN,
  1205. "[sess :%llu] un-released handle %u\n",
  1206. client->id, curr->key);
  1207. j = kref_read(&curr->refcount);
  1208. /* release pending reference */
  1209. while (j--)
  1210. kref_put(&curr->refcount, synx_util_destroy_handle);
  1211. }
  1212. mutex_destroy(&client->event_q_lock);
  1213. dprintk(SYNX_VERB, "session %llu [%s] destroyed %pK\n",
  1214. client->id, client->name, client);
  1215. vfree(client);
  1216. }
  1217. static void synx_client_destroy(struct kref *kref)
  1218. {
  1219. struct synx_client *client =
  1220. container_of(kref, struct synx_client, refcount);
  1221. hash_del(&client->node);
  1222. INIT_WORK(&client->dispatch, synx_client_cleanup);
  1223. queue_work(synx_dev->wq_cleanup, &client->dispatch);
  1224. }
  1225. void synx_put_client(struct synx_client *client)
  1226. {
  1227. if (IS_ERR_OR_NULL(client))
  1228. return;
  1229. spin_lock_bh(&synx_dev->native->metadata_map_lock);
  1230. kref_put(&client->refcount, synx_client_destroy);
  1231. spin_unlock_bh(&synx_dev->native->metadata_map_lock);
  1232. }
  1233. void synx_util_generate_timestamp(char *timestamp, size_t size)
  1234. {
  1235. struct timespec64 tv;
  1236. struct tm tm;
  1237. ktime_get_real_ts64(&tv);
  1238. time64_to_tm(tv.tv_sec, 0, &tm);
  1239. snprintf(timestamp, size, "%02d-%02d %02d:%02d:%02d",
  1240. tm.tm_mon + 1, tm.tm_mday, tm.tm_hour,
  1241. tm.tm_min, tm.tm_sec);
  1242. }
  1243. void synx_util_log_error(u32 client_id, u32 h_synx, s32 err)
  1244. {
  1245. struct error_node *err_node;
  1246. if (!synx_dev->debugfs_root)
  1247. return;
  1248. err_node = kzalloc(sizeof(*err_node), GFP_KERNEL);
  1249. if (!err_node)
  1250. return;
  1251. err_node->client_id = client_id;
  1252. err_node->error_code = err;
  1253. err_node->h_synx = h_synx;
  1254. synx_util_generate_timestamp(err_node->timestamp,
  1255. sizeof(err_node->timestamp));
  1256. mutex_lock(&synx_dev->error_lock);
  1257. list_add(&err_node->node,
  1258. &synx_dev->error_list);
  1259. mutex_unlock(&synx_dev->error_lock);
  1260. }
  1261. int synx_util_save_data(void *fence, u32 flags,
  1262. u32 h_synx)
  1263. {
  1264. int rc = SYNX_SUCCESS;
  1265. struct synx_entry_64 *entry, *curr;
  1266. u64 key;
  1267. u32 tbl = synx_util_map_params_to_type(flags);
  1268. switch (tbl) {
  1269. case SYNX_TYPE_CSL:
  1270. key = *(u32 *)fence;
  1271. spin_lock_bh(&synx_dev->native->csl_map_lock);
  1272. /* ensure fence is not already added to map */
  1273. hash_for_each_possible(synx_dev->native->csl_fence_map,
  1274. curr, node, key) {
  1275. if (curr->key == key) {
  1276. rc = -SYNX_ALREADY;
  1277. break;
  1278. }
  1279. }
  1280. if (rc == SYNX_SUCCESS) {
  1281. entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
  1282. if (entry) {
  1283. entry->data[0] = h_synx;
  1284. entry->key = key;
  1285. kref_init(&entry->refcount);
  1286. hash_add(synx_dev->native->csl_fence_map,
  1287. &entry->node, entry->key);
  1288. dprintk(SYNX_MEM, "added csl fence %d to map %pK\n",
  1289. entry->key, entry);
  1290. } else {
  1291. rc = -SYNX_NOMEM;
  1292. }
  1293. }
  1294. spin_unlock_bh(&synx_dev->native->csl_map_lock);
  1295. break;
  1296. default:
  1297. dprintk(SYNX_ERR, "invalid hash table selection\n");
  1298. kfree(entry);
  1299. rc = -SYNX_INVALID;
  1300. }
  1301. return rc;
  1302. }
  1303. struct synx_entry_64 *synx_util_retrieve_data(void *fence,
  1304. u32 type)
  1305. {
  1306. u64 key;
  1307. struct synx_entry_64 *entry = NULL;
  1308. struct synx_entry_64 *curr;
  1309. switch (type) {
  1310. case SYNX_TYPE_CSL:
  1311. key = *(u32 *)fence;
  1312. spin_lock_bh(&synx_dev->native->csl_map_lock);
  1313. hash_for_each_possible(synx_dev->native->csl_fence_map,
  1314. curr, node, key) {
  1315. if (curr->key == key) {
  1316. kref_get(&curr->refcount);
  1317. entry = curr;
  1318. break;
  1319. }
  1320. }
  1321. spin_unlock_bh(&synx_dev->native->csl_map_lock);
  1322. break;
  1323. default:
  1324. dprintk(SYNX_ERR, "invalid hash table selection %u\n",
  1325. type);
  1326. }
  1327. return entry;
  1328. }
  1329. static void synx_util_destroy_data(struct kref *kref)
  1330. {
  1331. struct synx_entry_64 *entry =
  1332. container_of(kref, struct synx_entry_64, refcount);
  1333. hash_del(&entry->node);
  1334. dprintk(SYNX_MEM, "released fence %llu entry %pK\n",
  1335. entry->key, entry);
  1336. kfree(entry);
  1337. }
  1338. void synx_util_remove_data(void *fence,
  1339. u32 type)
  1340. {
  1341. u64 key;
  1342. struct synx_entry_64 *entry = NULL;
  1343. struct synx_entry_64 *curr;
  1344. if (IS_ERR_OR_NULL(fence))
  1345. return;
  1346. switch (type) {
  1347. case SYNX_TYPE_CSL:
  1348. key = *((u32 *)fence);
  1349. spin_lock_bh(&synx_dev->native->csl_map_lock);
  1350. hash_for_each_possible(synx_dev->native->csl_fence_map,
  1351. curr, node, key) {
  1352. if (curr->key == key) {
  1353. entry = curr;
  1354. break;
  1355. }
  1356. }
  1357. if (entry)
  1358. kref_put(&entry->refcount, synx_util_destroy_data);
  1359. spin_unlock_bh(&synx_dev->native->csl_map_lock);
  1360. break;
  1361. default:
  1362. dprintk(SYNX_ERR, "invalid hash table selection %u\n",
  1363. type);
  1364. }
  1365. }
  1366. void synx_util_map_import_params_to_create(
  1367. struct synx_import_indv_params *params,
  1368. struct synx_create_params *c_params)
  1369. {
  1370. if (IS_ERR_OR_NULL(params) || IS_ERR_OR_NULL(c_params))
  1371. return;
  1372. if (params->flags & SYNX_IMPORT_GLOBAL_FENCE)
  1373. c_params->flags |= SYNX_CREATE_GLOBAL_FENCE;
  1374. if (params->flags & SYNX_IMPORT_LOCAL_FENCE)
  1375. c_params->flags |= SYNX_CREATE_LOCAL_FENCE;
  1376. if (params->flags & SYNX_IMPORT_DMA_FENCE)
  1377. c_params->flags |= SYNX_CREATE_DMA_FENCE;
  1378. }
  1379. u32 synx_util_map_client_id_to_core(
  1380. enum synx_client_id id)
  1381. {
  1382. u32 core_id;
  1383. switch (id) {
  1384. case SYNX_CLIENT_NATIVE:
  1385. core_id = SYNX_CORE_APSS; break;
  1386. case SYNX_CLIENT_ICP_CTX0:
  1387. core_id = SYNX_CORE_ICP; break;
  1388. case SYNX_CLIENT_EVA_CTX0:
  1389. core_id = SYNX_CORE_EVA; break;
  1390. case SYNX_CLIENT_VID_CTX0:
  1391. core_id = SYNX_CORE_IRIS; break;
  1392. case SYNX_CLIENT_NSP_CTX0:
  1393. core_id = SYNX_CORE_NSP; break;
  1394. default:
  1395. core_id = SYNX_CORE_MAX;
  1396. }
  1397. return core_id;
  1398. }