synx_util.c 36 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2019-2021, The Linux Foundation. All rights reserved.
  4. * Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved.
  5. */
  6. #include <linux/slab.h>
  7. #include <linux/random.h>
  8. #include <linux/vmalloc.h>
  9. #include "synx_debugfs.h"
  10. #include "synx_util.h"
  11. extern void synx_external_callback(s32 sync_obj, int status, void *data);
  12. int synx_util_init_coredata(struct synx_coredata *synx_obj,
  13. struct synx_create_params *params,
  14. struct dma_fence_ops *ops,
  15. u64 dma_context)
  16. {
  17. int rc = -SYNX_INVALID;
  18. spinlock_t *fence_lock;
  19. struct dma_fence *fence;
  20. struct synx_fence_entry *entry;
  21. if (IS_ERR_OR_NULL(synx_obj) || IS_ERR_OR_NULL(params) ||
  22. IS_ERR_OR_NULL(ops) || IS_ERR_OR_NULL(params->h_synx))
  23. return -SYNX_INVALID;
  24. if (params->flags & SYNX_CREATE_GLOBAL_FENCE &&
  25. *params->h_synx != 0) {
  26. rc = synx_global_get_ref(
  27. synx_util_global_idx(*params->h_synx));
  28. synx_obj->global_idx = synx_util_global_idx(*params->h_synx);
  29. } else if (params->flags & SYNX_CREATE_GLOBAL_FENCE) {
  30. rc = synx_alloc_global_handle(params->h_synx);
  31. synx_obj->global_idx = synx_util_global_idx(*params->h_synx);
  32. } else {
  33. rc = synx_alloc_local_handle(params->h_synx);
  34. }
  35. if (rc != SYNX_SUCCESS)
  36. return rc;
  37. synx_obj->map_count = 1;
  38. synx_obj->num_bound_synxs = 0;
  39. synx_obj->type |= params->flags;
  40. kref_init(&synx_obj->refcount);
  41. mutex_init(&synx_obj->obj_lock);
  42. INIT_LIST_HEAD(&synx_obj->reg_cbs_list);
  43. if (params->name)
  44. strlcpy(synx_obj->name, params->name, sizeof(synx_obj->name));
  45. if (params->flags & SYNX_CREATE_DMA_FENCE) {
  46. fence = params->fence;
  47. if (IS_ERR_OR_NULL(fence)) {
  48. dprintk(SYNX_ERR, "invalid external fence\n");
  49. goto free;
  50. }
  51. dma_fence_get(fence);
  52. synx_obj->fence = fence;
  53. } else {
  54. /*
  55. * lock and fence memory will be released in fence
  56. * release function
  57. */
  58. fence_lock = kzalloc(sizeof(*fence_lock), GFP_KERNEL);
  59. if (IS_ERR_OR_NULL(fence_lock)) {
  60. rc = -SYNX_NOMEM;
  61. goto free;
  62. }
  63. fence = kzalloc(sizeof(*fence), GFP_KERNEL);
  64. if (IS_ERR_OR_NULL(fence)) {
  65. kfree(fence_lock);
  66. rc = -SYNX_NOMEM;
  67. goto free;
  68. }
  69. spin_lock_init(fence_lock);
  70. dma_fence_init(fence, ops, fence_lock, dma_context, 1);
  71. synx_obj->fence = fence;
  72. synx_util_activate(synx_obj);
  73. dprintk(SYNX_MEM,
  74. "allocated backing fence %pK\n", fence);
  75. entry = kzalloc(sizeof(*entry), GFP_KERNEL);
  76. if (IS_ERR_OR_NULL(entry)) {
  77. rc = -SYNX_NOMEM;
  78. goto clean;
  79. }
  80. entry->key = (u64)fence;
  81. if (params->flags & SYNX_CREATE_GLOBAL_FENCE)
  82. entry->g_handle = *params->h_synx;
  83. else
  84. entry->l_handle = *params->h_synx;
  85. rc = synx_util_insert_fence_entry(entry,
  86. params->h_synx,
  87. params->flags & SYNX_CREATE_GLOBAL_FENCE);
  88. BUG_ON(rc != SYNX_SUCCESS);
  89. }
  90. if (rc != SYNX_SUCCESS)
  91. goto clean;
  92. return SYNX_SUCCESS;
  93. clean:
  94. dma_fence_put(fence);
  95. free:
  96. if (params->flags & SYNX_CREATE_GLOBAL_FENCE)
  97. synx_global_put_ref(
  98. synx_util_global_idx(*params->h_synx));
  99. else
  100. clear_bit(synx_util_global_idx(*params->h_synx),
  101. synx_dev->native->bitmap);
  102. return rc;
  103. }
  104. int synx_util_add_callback(struct synx_coredata *synx_obj,
  105. u32 h_synx)
  106. {
  107. int rc;
  108. struct synx_signal_cb *signal_cb;
  109. if (IS_ERR_OR_NULL(synx_obj))
  110. return -SYNX_INVALID;
  111. signal_cb = kzalloc(sizeof(*signal_cb), GFP_KERNEL);
  112. if (IS_ERR_OR_NULL(signal_cb))
  113. return -SYNX_NOMEM;
  114. signal_cb->handle = h_synx;
  115. signal_cb->flag = SYNX_SIGNAL_FROM_FENCE;
  116. signal_cb->synx_obj = synx_obj;
  117. /* get reference on synx coredata for signal cb */
  118. synx_util_get_object(synx_obj);
  119. /*
  120. * adding callback enables synx framework to
  121. * get notified on signal from clients using
  122. * native dma fence operations.
  123. */
  124. rc = dma_fence_add_callback(synx_obj->fence,
  125. &signal_cb->fence_cb, synx_fence_callback);
  126. if (rc != 0) {
  127. if (rc == -ENOENT) {
  128. if (synx_util_is_global_object(synx_obj)) {
  129. /* signal (if) global handle */
  130. rc = synx_global_update_status(
  131. synx_obj->global_idx,
  132. synx_util_get_object_status(synx_obj));
  133. if (rc != SYNX_SUCCESS)
  134. dprintk(SYNX_ERR,
  135. "status update of %u with fence %pK\n",
  136. synx_obj->global_idx, synx_obj->fence);
  137. } else {
  138. rc = SYNX_SUCCESS;
  139. }
  140. } else {
  141. dprintk(SYNX_ERR,
  142. "error adding callback for %pK err %d\n",
  143. synx_obj->fence, rc);
  144. }
  145. synx_util_put_object(synx_obj);
  146. kfree(signal_cb);
  147. return rc;
  148. }
  149. synx_obj->signal_cb = signal_cb;
  150. dprintk(SYNX_VERB, "added callback %pK to fence %pK\n",
  151. signal_cb, synx_obj->fence);
  152. return SYNX_SUCCESS;
  153. }
  154. int synx_util_init_group_coredata(struct synx_coredata *synx_obj,
  155. struct dma_fence **fences,
  156. struct synx_merge_params *params,
  157. u32 num_objs,
  158. u64 dma_context)
  159. {
  160. int rc;
  161. struct dma_fence_array *array;
  162. if (IS_ERR_OR_NULL(synx_obj))
  163. return -SYNX_INVALID;
  164. if (params->flags & SYNX_MERGE_GLOBAL_FENCE) {
  165. rc = synx_alloc_global_handle(params->h_merged_obj);
  166. synx_obj->global_idx =
  167. synx_util_global_idx(*params->h_merged_obj);
  168. } else {
  169. rc = synx_alloc_local_handle(params->h_merged_obj);
  170. }
  171. if (rc != SYNX_SUCCESS)
  172. return rc;
  173. array = dma_fence_array_create(num_objs, fences,
  174. dma_context, 1, false);
  175. if (IS_ERR_OR_NULL(array))
  176. return -SYNX_INVALID;
  177. synx_obj->fence = &array->base;
  178. synx_obj->map_count = 1;
  179. synx_obj->type = params->flags;
  180. synx_obj->type |= SYNX_CREATE_MERGED_FENCE;
  181. synx_obj->num_bound_synxs = 0;
  182. kref_init(&synx_obj->refcount);
  183. mutex_init(&synx_obj->obj_lock);
  184. INIT_LIST_HEAD(&synx_obj->reg_cbs_list);
  185. synx_util_activate(synx_obj);
  186. return rc;
  187. }
  188. static void synx_util_destroy_coredata(struct kref *kref)
  189. {
  190. struct synx_coredata *synx_obj =
  191. container_of(kref, struct synx_coredata, refcount);
  192. if (synx_util_is_global_object(synx_obj))
  193. synx_global_put_ref(synx_obj->global_idx);
  194. synx_util_object_destroy(synx_obj);
  195. }
  196. void synx_util_get_object(struct synx_coredata *synx_obj)
  197. {
  198. kref_get(&synx_obj->refcount);
  199. }
  200. void synx_util_put_object(struct synx_coredata *synx_obj)
  201. {
  202. kref_put(&synx_obj->refcount, synx_util_destroy_coredata);
  203. }
  204. void synx_util_object_destroy(struct synx_coredata *synx_obj)
  205. {
  206. int rc;
  207. u32 i;
  208. s32 sync_id;
  209. u32 type;
  210. struct synx_cb_data *synx_cb, *synx_cb_temp;
  211. struct synx_bind_desc *bind_desc;
  212. struct bind_operations *bind_ops;
  213. struct synx_external_data *data;
  214. /* clear all the undispatched callbacks */
  215. list_for_each_entry_safe(synx_cb,
  216. synx_cb_temp, &synx_obj->reg_cbs_list, node) {
  217. dprintk(SYNX_ERR,
  218. "cleaning up callback of session %pK\n",
  219. synx_cb->session);
  220. list_del_init(&synx_cb->node);
  221. kfree(synx_cb);
  222. }
  223. for (i = 0; i < synx_obj->num_bound_synxs; i++) {
  224. bind_desc = &synx_obj->bound_synxs[i];
  225. sync_id = bind_desc->external_desc.id;
  226. type = bind_desc->external_desc.type;
  227. data = bind_desc->external_data;
  228. bind_ops = synx_util_get_bind_ops(type);
  229. if (IS_ERR_OR_NULL(bind_ops)) {
  230. dprintk(SYNX_ERR,
  231. "bind ops fail id: %d, type: %u, err: %d\n",
  232. sync_id, type, rc);
  233. continue;
  234. }
  235. /* clear the hash table entry */
  236. synx_util_remove_data(&sync_id, type);
  237. rc = bind_ops->deregister_callback(
  238. synx_external_callback, data, sync_id);
  239. if (rc < 0) {
  240. dprintk(SYNX_ERR,
  241. "de-registration fail id: %d, type: %u, err: %d\n",
  242. sync_id, type, rc);
  243. continue;
  244. }
  245. /*
  246. * release the memory allocated for external data.
  247. * It is safe to release this memory
  248. * only if deregistration is successful.
  249. */
  250. kfree(data);
  251. }
  252. mutex_destroy(&synx_obj->obj_lock);
  253. synx_util_release_fence_entry((u64)synx_obj->fence);
  254. dma_fence_put(synx_obj->fence);
  255. kfree(synx_obj);
  256. dprintk(SYNX_MEM, "released synx object %pK\n", synx_obj);
  257. }
  258. long synx_util_get_free_handle(unsigned long *bitmap, unsigned int size)
  259. {
  260. bool bit;
  261. long idx;
  262. do {
  263. idx = find_first_zero_bit(bitmap, size);
  264. if (idx >= size)
  265. break;
  266. bit = test_and_set_bit(idx, bitmap);
  267. } while (bit);
  268. return idx;
  269. }
  270. u32 synx_encode_handle(u32 idx, u32 core_id, bool global_idx)
  271. {
  272. u32 handle = 0;
  273. if (idx >= SYNX_MAX_OBJS)
  274. return 0;
  275. if (global_idx) {
  276. handle = 1;
  277. handle <<= SYNX_HANDLE_CORE_BITS;
  278. }
  279. handle |= core_id;
  280. handle <<= SYNX_HANDLE_INDEX_BITS;
  281. handle |= idx;
  282. return handle;
  283. }
  284. int synx_alloc_global_handle(u32 *new_synx)
  285. {
  286. int rc;
  287. u32 idx;
  288. rc = synx_global_alloc_index(&idx);
  289. if (rc != SYNX_SUCCESS)
  290. return rc;
  291. *new_synx = synx_encode_handle(idx, SYNX_CORE_APSS, true);
  292. dprintk(SYNX_DBG, "allocated global handle %u (0x%x)\n",
  293. *new_synx, *new_synx);
  294. rc = synx_global_init_coredata(*new_synx);
  295. return rc;
  296. }
  297. int synx_alloc_local_handle(u32 *new_synx)
  298. {
  299. u32 idx;
  300. idx = synx_util_get_free_handle(synx_dev->native->bitmap,
  301. SYNX_MAX_OBJS);
  302. if (idx >= SYNX_MAX_OBJS)
  303. return -SYNX_NOMEM;
  304. *new_synx = synx_encode_handle(idx, SYNX_CORE_APSS, false);
  305. dprintk(SYNX_DBG, "allocated local handle %u (0x%x)\n",
  306. *new_synx, *new_synx);
  307. return SYNX_SUCCESS;
  308. }
  309. int synx_util_init_handle(struct synx_client *client,
  310. struct synx_coredata *synx_obj, u32 *new_h_synx,
  311. void *map_entry)
  312. {
  313. int rc = SYNX_SUCCESS;
  314. bool found = false;
  315. struct synx_handle_coredata *synx_data, *curr;
  316. if (IS_ERR_OR_NULL(client) || IS_ERR_OR_NULL(synx_obj) ||
  317. IS_ERR_OR_NULL(new_h_synx) || IS_ERR_OR_NULL(map_entry))
  318. return -SYNX_INVALID;
  319. synx_data = kzalloc(sizeof(*synx_data), GFP_ATOMIC);
  320. if (IS_ERR_OR_NULL(synx_data))
  321. return -SYNX_NOMEM;
  322. synx_data->client = client;
  323. synx_data->synx_obj = synx_obj;
  324. synx_data->key = *new_h_synx;
  325. synx_data->map_entry = map_entry;
  326. kref_init(&synx_data->refcount);
  327. synx_data->rel_count = 1;
  328. spin_lock_bh(&client->handle_map_lock);
  329. hash_for_each_possible(client->handle_map,
  330. curr, node, *new_h_synx) {
  331. if (curr->key == *new_h_synx) {
  332. if (curr->synx_obj != synx_obj) {
  333. rc = -SYNX_INVALID;
  334. dprintk(SYNX_ERR,
  335. "inconsistent data in handle map\n");
  336. } else {
  337. kref_get(&curr->refcount);
  338. curr->rel_count++;
  339. }
  340. found = true;
  341. break;
  342. }
  343. }
  344. if (unlikely(found))
  345. kfree(synx_data);
  346. else
  347. hash_add(client->handle_map,
  348. &synx_data->node, *new_h_synx);
  349. spin_unlock_bh(&client->handle_map_lock);
  350. return rc;
  351. }
  352. int synx_util_activate(struct synx_coredata *synx_obj)
  353. {
  354. if (IS_ERR_OR_NULL(synx_obj))
  355. return -SYNX_INVALID;
  356. /* move synx to ACTIVE state and register cb for merged object */
  357. dma_fence_enable_sw_signaling(synx_obj->fence);
  358. return 0;
  359. }
  360. static u32 synx_util_get_references(struct synx_coredata *synx_obj)
  361. {
  362. u32 count = 0;
  363. u32 i = 0;
  364. struct dma_fence_array *array = NULL;
  365. /* obtain dma fence reference */
  366. if (dma_fence_is_array(synx_obj->fence)) {
  367. array = to_dma_fence_array(synx_obj->fence);
  368. if (IS_ERR_OR_NULL(array))
  369. return 0;
  370. for (i = 0; i < array->num_fences; i++)
  371. dma_fence_get(array->fences[i]);
  372. count = array->num_fences;
  373. } else {
  374. dma_fence_get(synx_obj->fence);
  375. count = 1;
  376. }
  377. return count;
  378. }
  379. static void synx_util_put_references(struct synx_coredata *synx_obj)
  380. {
  381. u32 i = 0;
  382. struct dma_fence_array *array = NULL;
  383. if (dma_fence_is_array(synx_obj->fence)) {
  384. array = to_dma_fence_array(synx_obj->fence);
  385. if (IS_ERR_OR_NULL(array))
  386. return;
  387. for (i = 0; i < array->num_fences; i++)
  388. dma_fence_put(array->fences[i]);
  389. } else {
  390. dma_fence_put(synx_obj->fence);
  391. }
  392. }
  393. static u32 synx_util_add_fence(struct synx_coredata *synx_obj,
  394. struct dma_fence **fences,
  395. u32 idx)
  396. {
  397. struct dma_fence_array *array = NULL;
  398. u32 i = 0;
  399. if (dma_fence_is_array(synx_obj->fence)) {
  400. array = to_dma_fence_array(synx_obj->fence);
  401. if (IS_ERR_OR_NULL(array))
  402. return 0;
  403. for (i = 0; i < array->num_fences; i++)
  404. fences[idx+i] = array->fences[i];
  405. return array->num_fences;
  406. }
  407. fences[idx] = synx_obj->fence;
  408. return 1;
  409. }
  410. static u32 synx_util_remove_duplicates(struct dma_fence **arr, u32 num)
  411. {
  412. int i, j;
  413. u32 wr_idx = 1;
  414. if (IS_ERR_OR_NULL(arr)) {
  415. dprintk(SYNX_ERR, "invalid input array\n");
  416. return 0;
  417. }
  418. for (i = 1; i < num; i++) {
  419. for (j = 0; j < wr_idx ; j++) {
  420. if (arr[i] == arr[j]) {
  421. /* release reference obtained for duplicate */
  422. dprintk(SYNX_DBG,
  423. "releasing duplicate reference\n");
  424. dma_fence_put(arr[i]);
  425. break;
  426. }
  427. }
  428. if (j == wr_idx)
  429. arr[wr_idx++] = arr[i];
  430. }
  431. return wr_idx;
  432. }
  433. s32 synx_util_merge_error(struct synx_client *client,
  434. u32 *h_synxs,
  435. u32 num_objs)
  436. {
  437. u32 i = 0;
  438. struct synx_handle_coredata *synx_data;
  439. struct synx_coredata *synx_obj;
  440. if (IS_ERR_OR_NULL(client) || IS_ERR_OR_NULL(h_synxs))
  441. return -SYNX_INVALID;
  442. for (i = 0; i < num_objs; i++) {
  443. synx_data = synx_util_acquire_handle(client, h_synxs[i]);
  444. synx_obj = synx_util_obtain_object(synx_data);
  445. if (IS_ERR_OR_NULL(synx_obj) ||
  446. IS_ERR_OR_NULL(synx_obj->fence)) {
  447. dprintk(SYNX_ERR,
  448. "[sess :%llu] invalid handle %d in cleanup\n",
  449. client->id, h_synxs[i]);
  450. continue;
  451. }
  452. /* release all references obtained during merge validatation */
  453. synx_util_put_references(synx_obj);
  454. synx_util_release_handle(synx_data);
  455. }
  456. return 0;
  457. }
  458. int synx_util_validate_merge(struct synx_client *client,
  459. u32 *h_synxs,
  460. u32 num_objs,
  461. struct dma_fence ***fence_list,
  462. u32 *fence_cnt)
  463. {
  464. u32 count = 0;
  465. u32 i = 0;
  466. struct synx_handle_coredata **synx_datas;
  467. struct synx_coredata **synx_objs;
  468. struct dma_fence **fences = NULL;
  469. if (num_objs <= 1) {
  470. dprintk(SYNX_ERR, "single handle merge is not allowed\n");
  471. return -SYNX_INVALID;
  472. }
  473. synx_datas = kcalloc(num_objs, sizeof(*synx_datas), GFP_KERNEL);
  474. if (IS_ERR_OR_NULL(synx_datas))
  475. return -SYNX_NOMEM;
  476. synx_objs = kcalloc(num_objs, sizeof(*synx_objs), GFP_KERNEL);
  477. if (IS_ERR_OR_NULL(synx_objs)) {
  478. kfree(synx_datas);
  479. return -SYNX_NOMEM;
  480. }
  481. for (i = 0; i < num_objs; i++) {
  482. synx_datas[i] = synx_util_acquire_handle(client, h_synxs[i]);
  483. synx_objs[i] = synx_util_obtain_object(synx_datas[i]);
  484. if (IS_ERR_OR_NULL(synx_objs[i]) ||
  485. IS_ERR_OR_NULL(synx_objs[i]->fence)) {
  486. dprintk(SYNX_ERR,
  487. "[sess :%llu] invalid handle %d in merge list\n",
  488. client->id, h_synxs[i]);
  489. *fence_cnt = i;
  490. goto error;
  491. }
  492. count += synx_util_get_references(synx_objs[i]);
  493. }
  494. fences = kcalloc(count, sizeof(*fences), GFP_KERNEL);
  495. if (IS_ERR_OR_NULL(fences)) {
  496. *fence_cnt = num_objs;
  497. goto error;
  498. }
  499. /* memory will be released later in the invoking function */
  500. *fence_list = fences;
  501. count = 0;
  502. for (i = 0; i < num_objs; i++) {
  503. count += synx_util_add_fence(synx_objs[i], fences, count);
  504. /* release the reference obtained earlier in the function */
  505. synx_util_release_handle(synx_datas[i]);
  506. }
  507. *fence_cnt = synx_util_remove_duplicates(fences, count);
  508. kfree(synx_objs);
  509. kfree(synx_datas);
  510. return 0;
  511. error:
  512. /* release the reference/s obtained earlier in the function */
  513. for (i = 0; i < *fence_cnt; i++) {
  514. synx_util_put_references(synx_objs[i]);
  515. synx_util_release_handle(synx_datas[i]);
  516. }
  517. *fence_cnt = 0;
  518. kfree(synx_objs);
  519. kfree(synx_datas);
  520. return -SYNX_INVALID;
  521. }
  522. static u32 __fence_state(struct dma_fence *fence, bool locked)
  523. {
  524. s32 status;
  525. u32 state = SYNX_STATE_INVALID;
  526. if (IS_ERR_OR_NULL(fence)) {
  527. dprintk(SYNX_ERR, "invalid fence\n");
  528. return SYNX_STATE_INVALID;
  529. }
  530. if (locked)
  531. status = dma_fence_get_status_locked(fence);
  532. else
  533. status = dma_fence_get_status(fence);
  534. /* convert fence status to synx state */
  535. switch (status) {
  536. case 0:
  537. state = SYNX_STATE_ACTIVE;
  538. break;
  539. case 1:
  540. state = SYNX_STATE_SIGNALED_SUCCESS;
  541. break;
  542. case -SYNX_STATE_SIGNALED_CANCEL:
  543. state = SYNX_STATE_SIGNALED_CANCEL;
  544. break;
  545. case -SYNX_STATE_SIGNALED_EXTERNAL:
  546. state = SYNX_STATE_SIGNALED_EXTERNAL;
  547. break;
  548. case -SYNX_STATE_SIGNALED_ERROR:
  549. state = SYNX_STATE_SIGNALED_ERROR;
  550. break;
  551. default:
  552. state = (u32)(-status);
  553. }
  554. return state;
  555. }
  556. static u32 __fence_group_state(struct dma_fence *fence, bool locked)
  557. {
  558. u32 i = 0;
  559. u32 state = SYNX_STATE_INVALID;
  560. struct dma_fence_array *array = NULL;
  561. u32 intr, actv_cnt, sig_cnt, err_cnt;
  562. if (IS_ERR_OR_NULL(fence)) {
  563. dprintk(SYNX_ERR, "invalid fence\n");
  564. return SYNX_STATE_INVALID;
  565. }
  566. actv_cnt = sig_cnt = err_cnt = 0;
  567. array = to_dma_fence_array(fence);
  568. if (IS_ERR_OR_NULL(array))
  569. return SYNX_STATE_INVALID;
  570. for (i = 0; i < array->num_fences; i++) {
  571. intr = __fence_state(array->fences[i], locked);
  572. switch (intr) {
  573. case SYNX_STATE_ACTIVE:
  574. actv_cnt++;
  575. break;
  576. case SYNX_STATE_SIGNALED_SUCCESS:
  577. sig_cnt++;
  578. break;
  579. default:
  580. err_cnt++;
  581. }
  582. }
  583. dprintk(SYNX_DBG,
  584. "group cnt stats act:%u, sig: %u, err: %u\n",
  585. actv_cnt, sig_cnt, err_cnt);
  586. if (err_cnt)
  587. state = SYNX_STATE_SIGNALED_ERROR;
  588. else if (actv_cnt)
  589. state = SYNX_STATE_ACTIVE;
  590. else if (sig_cnt == array->num_fences)
  591. state = SYNX_STATE_SIGNALED_SUCCESS;
  592. return state;
  593. }
  594. /*
  595. * WARN: Should not hold the fence spinlock when invoking
  596. * this function. Use synx_fence_state_locked instead
  597. */
  598. u32 synx_util_get_object_status(struct synx_coredata *synx_obj)
  599. {
  600. u32 state;
  601. if (IS_ERR_OR_NULL(synx_obj))
  602. return SYNX_STATE_INVALID;
  603. if (synx_util_is_merged_object(synx_obj))
  604. state = __fence_group_state(synx_obj->fence, false);
  605. else
  606. state = __fence_state(synx_obj->fence, false);
  607. return state;
  608. }
  609. /* use this for status check when holding on to metadata spinlock */
  610. u32 synx_util_get_object_status_locked(struct synx_coredata *synx_obj)
  611. {
  612. u32 state;
  613. if (IS_ERR_OR_NULL(synx_obj))
  614. return SYNX_STATE_INVALID;
  615. if (synx_util_is_merged_object(synx_obj))
  616. state = __fence_group_state(synx_obj->fence, true);
  617. else
  618. state = __fence_state(synx_obj->fence, true);
  619. return state;
  620. }
  621. struct synx_handle_coredata *synx_util_acquire_handle(
  622. struct synx_client *client, u32 h_synx)
  623. {
  624. struct synx_handle_coredata *synx_data = NULL;
  625. struct synx_handle_coredata *synx_handle =
  626. ERR_PTR(-SYNX_NOENT);
  627. if (IS_ERR_OR_NULL(client))
  628. return ERR_PTR(-SYNX_INVALID);
  629. spin_lock_bh(&client->handle_map_lock);
  630. hash_for_each_possible(client->handle_map,
  631. synx_data, node, h_synx) {
  632. if (synx_data->key == h_synx &&
  633. synx_data->rel_count != 0) {
  634. kref_get(&synx_data->refcount);
  635. synx_handle = synx_data;
  636. break;
  637. }
  638. }
  639. spin_unlock_bh(&client->handle_map_lock);
  640. return synx_handle;
  641. }
  642. struct synx_map_entry *synx_util_insert_to_map(
  643. struct synx_coredata *synx_obj,
  644. u32 h_synx, u32 flags)
  645. {
  646. struct synx_map_entry *map_entry;
  647. map_entry = kzalloc(sizeof(*map_entry), GFP_KERNEL);
  648. if (IS_ERR_OR_NULL(map_entry))
  649. return ERR_PTR(-SYNX_NOMEM);
  650. kref_init(&map_entry->refcount);
  651. map_entry->synx_obj = synx_obj;
  652. map_entry->flags = flags;
  653. map_entry->key = h_synx;
  654. if (synx_util_is_global_handle(h_synx)) {
  655. spin_lock_bh(&synx_dev->native->global_map_lock);
  656. hash_add(synx_dev->native->global_map,
  657. &map_entry->node, h_synx);
  658. spin_unlock_bh(&synx_dev->native->global_map_lock);
  659. dprintk(SYNX_MEM,
  660. "added handle %u to global map %pK\n",
  661. h_synx, map_entry);
  662. } else {
  663. spin_lock_bh(&synx_dev->native->local_map_lock);
  664. hash_add(synx_dev->native->local_map,
  665. &map_entry->node, h_synx);
  666. spin_unlock_bh(&synx_dev->native->local_map_lock);
  667. dprintk(SYNX_MEM,
  668. "added handle %u to local map %pK\n",
  669. h_synx, map_entry);
  670. }
  671. return map_entry;
  672. }
  673. struct synx_map_entry *synx_util_get_map_entry(u32 h_synx)
  674. {
  675. struct synx_map_entry *curr;
  676. struct synx_map_entry *map_entry = ERR_PTR(-SYNX_NOENT);
  677. if (h_synx == 0)
  678. return ERR_PTR(-SYNX_INVALID);
  679. if (synx_util_is_global_handle(h_synx)) {
  680. spin_lock_bh(&synx_dev->native->global_map_lock);
  681. hash_for_each_possible(synx_dev->native->global_map,
  682. curr, node, h_synx) {
  683. if (curr->key == h_synx) {
  684. kref_get(&curr->refcount);
  685. map_entry = curr;
  686. break;
  687. }
  688. }
  689. spin_unlock_bh(&synx_dev->native->global_map_lock);
  690. } else {
  691. spin_lock_bh(&synx_dev->native->local_map_lock);
  692. hash_for_each_possible(synx_dev->native->local_map,
  693. curr, node, h_synx) {
  694. if (curr->key == h_synx) {
  695. kref_get(&curr->refcount);
  696. map_entry = curr;
  697. break;
  698. }
  699. }
  700. spin_unlock_bh(&synx_dev->native->local_map_lock);
  701. }
  702. /* should we allocate if entry not found? */
  703. return map_entry;
  704. }
  705. static void synx_util_cleanup_fence(
  706. struct synx_coredata *synx_obj)
  707. {
  708. struct synx_signal_cb *signal_cb;
  709. unsigned long flags;
  710. u32 g_status;
  711. u32 f_status;
  712. mutex_lock(&synx_obj->obj_lock);
  713. synx_obj->map_count--;
  714. signal_cb = synx_obj->signal_cb;
  715. f_status = synx_util_get_object_status(synx_obj);
  716. dprintk(SYNX_VERB, "f_status:%u, signal_cb:%p, map:%u, idx:%u\n",
  717. f_status, signal_cb, synx_obj->map_count, synx_obj->global_idx);
  718. if (synx_obj->map_count == 0 &&
  719. (signal_cb != NULL) &&
  720. (synx_obj->global_idx != 0) &&
  721. (f_status == SYNX_STATE_ACTIVE)) {
  722. /*
  723. * no more clients interested for notification
  724. * on handle on local core.
  725. * remove reference held by callback on synx
  726. * coredata structure and update cb (if still
  727. * un-signaled) with global handle idx to
  728. * notify any cross-core clients waiting on
  729. * handle.
  730. */
  731. g_status = synx_global_get_status(synx_obj->global_idx);
  732. if (g_status > SYNX_STATE_ACTIVE) {
  733. dprintk(SYNX_DBG, "signaling fence %pK with status %u\n",
  734. synx_obj->fence, g_status);
  735. synx_native_signal_fence(synx_obj, g_status);
  736. } else {
  737. spin_lock_irqsave(synx_obj->fence->lock, flags);
  738. if (synx_util_get_object_status_locked(synx_obj) ==
  739. SYNX_STATE_ACTIVE) {
  740. signal_cb->synx_obj = NULL;
  741. signal_cb->handle = synx_obj->global_idx;
  742. synx_obj->signal_cb = NULL;
  743. /*
  744. * release reference held by signal cb and
  745. * get reference on global index instead.
  746. */
  747. synx_util_put_object(synx_obj);
  748. synx_global_get_ref(synx_obj->global_idx);
  749. }
  750. spin_unlock_irqrestore(synx_obj->fence->lock, flags);
  751. }
  752. } else if (synx_obj->map_count == 0 && signal_cb &&
  753. (f_status == SYNX_STATE_ACTIVE)) {
  754. if (dma_fence_remove_callback(synx_obj->fence,
  755. &signal_cb->fence_cb)) {
  756. kfree(signal_cb);
  757. synx_obj->signal_cb = NULL;
  758. /*
  759. * release reference held by signal cb and
  760. * get reference on global index instead.
  761. */
  762. synx_util_put_object(synx_obj);
  763. dprintk(SYNX_MEM, "signal cb destroyed %pK\n",
  764. synx_obj->signal_cb);
  765. }
  766. }
  767. mutex_unlock(&synx_obj->obj_lock);
  768. }
  769. static void synx_util_destroy_map_entry_worker(
  770. struct work_struct *dispatch)
  771. {
  772. struct synx_map_entry *map_entry =
  773. container_of(dispatch, struct synx_map_entry, dispatch);
  774. struct synx_coredata *synx_obj;
  775. synx_obj = map_entry->synx_obj;
  776. if (!IS_ERR_OR_NULL(synx_obj)) {
  777. synx_util_cleanup_fence(synx_obj);
  778. /* release reference held by map entry */
  779. synx_util_put_object(synx_obj);
  780. }
  781. if (!synx_util_is_global_handle(map_entry->key))
  782. clear_bit(synx_util_global_idx(map_entry->key),
  783. synx_dev->native->bitmap);
  784. dprintk(SYNX_VERB, "map entry for %u destroyed %pK\n",
  785. map_entry->key, map_entry);
  786. kfree(map_entry);
  787. }
  788. static void synx_util_destroy_map_entry(struct kref *kref)
  789. {
  790. struct synx_map_entry *map_entry =
  791. container_of(kref, struct synx_map_entry, refcount);
  792. hash_del(&map_entry->node);
  793. dprintk(SYNX_MEM, "map entry for %u removed %pK\n",
  794. map_entry->key, map_entry);
  795. INIT_WORK(&map_entry->dispatch, synx_util_destroy_map_entry_worker);
  796. queue_work(synx_dev->wq_cleanup, &map_entry->dispatch);
  797. }
  798. void synx_util_release_map_entry(struct synx_map_entry *map_entry)
  799. {
  800. spinlock_t *lock;
  801. if (IS_ERR_OR_NULL(map_entry))
  802. return;
  803. if (synx_util_is_global_handle(map_entry->key))
  804. lock = &synx_dev->native->global_map_lock;
  805. else
  806. lock = &synx_dev->native->local_map_lock;
  807. spin_lock_bh(lock);
  808. kref_put(&map_entry->refcount,
  809. synx_util_destroy_map_entry);
  810. spin_unlock_bh(lock);
  811. }
  812. static void synx_util_destroy_handle_worker(
  813. struct work_struct *dispatch)
  814. {
  815. struct synx_handle_coredata *synx_data =
  816. container_of(dispatch, struct synx_handle_coredata,
  817. dispatch);
  818. synx_util_release_map_entry(synx_data->map_entry);
  819. dprintk(SYNX_VERB, "handle %u destroyed %pK\n",
  820. synx_data->key, synx_data);
  821. kfree(synx_data);
  822. }
  823. static void synx_util_destroy_handle(struct kref *kref)
  824. {
  825. struct synx_handle_coredata *synx_data =
  826. container_of(kref, struct synx_handle_coredata,
  827. refcount);
  828. hash_del(&synx_data->node);
  829. dprintk(SYNX_MEM, "[sess :%llu] handle %u removed %pK\n",
  830. synx_data->client->id, synx_data->key, synx_data);
  831. INIT_WORK(&synx_data->dispatch, synx_util_destroy_handle_worker);
  832. queue_work(synx_dev->wq_cleanup, &synx_data->dispatch);
  833. }
  834. void synx_util_release_handle(struct synx_handle_coredata *synx_data)
  835. {
  836. struct synx_client *client;
  837. if (IS_ERR_OR_NULL(synx_data))
  838. return;
  839. client = synx_data->client;
  840. if (IS_ERR_OR_NULL(client))
  841. return;
  842. spin_lock_bh(&client->handle_map_lock);
  843. kref_put(&synx_data->refcount,
  844. synx_util_destroy_handle);
  845. spin_unlock_bh(&client->handle_map_lock);
  846. }
  847. struct bind_operations *synx_util_get_bind_ops(u32 type)
  848. {
  849. struct synx_registered_ops *client_ops;
  850. if (!synx_util_is_valid_bind_type(type))
  851. return NULL;
  852. mutex_lock(&synx_dev->vtbl_lock);
  853. client_ops = &synx_dev->bind_vtbl[type];
  854. if (!client_ops->valid) {
  855. mutex_unlock(&synx_dev->vtbl_lock);
  856. return NULL;
  857. }
  858. mutex_unlock(&synx_dev->vtbl_lock);
  859. return &client_ops->ops;
  860. }
  861. int synx_util_alloc_cb_entry(struct synx_client *client,
  862. struct synx_kernel_payload *data,
  863. u32 *cb_idx)
  864. {
  865. long idx;
  866. struct synx_client_cb *cb;
  867. if (IS_ERR_OR_NULL(client) || IS_ERR_OR_NULL(data) ||
  868. IS_ERR_OR_NULL(cb_idx))
  869. return -SYNX_INVALID;
  870. idx = synx_util_get_free_handle(client->cb_bitmap, SYNX_MAX_OBJS);
  871. if (idx >= SYNX_MAX_OBJS) {
  872. dprintk(SYNX_ERR,
  873. "[sess :%llu] free cb index not available\n",
  874. client->id);
  875. return -SYNX_NOMEM;
  876. }
  877. cb = &client->cb_table[idx];
  878. memset(cb, 0, sizeof(*cb));
  879. cb->is_valid = true;
  880. cb->client = client;
  881. cb->idx = idx;
  882. memcpy(&cb->kernel_cb, data,
  883. sizeof(cb->kernel_cb));
  884. *cb_idx = idx;
  885. dprintk(SYNX_VERB, "[sess :%llu] allocated cb index %u\n",
  886. client->id, *cb_idx);
  887. return 0;
  888. }
  889. int synx_util_clear_cb_entry(struct synx_client *client,
  890. struct synx_client_cb *cb)
  891. {
  892. int rc = 0;
  893. u32 idx;
  894. if (IS_ERR_OR_NULL(cb))
  895. return -SYNX_INVALID;
  896. idx = cb->idx;
  897. memset(cb, 0, sizeof(*cb));
  898. if (idx && idx < SYNX_MAX_OBJS) {
  899. clear_bit(idx, client->cb_bitmap);
  900. } else {
  901. dprintk(SYNX_ERR, "invalid index\n");
  902. rc = -SYNX_INVALID;
  903. }
  904. return rc;
  905. }
  906. void synx_util_default_user_callback(u32 h_synx,
  907. int status, void *data)
  908. {
  909. struct synx_client_cb *cb = data;
  910. struct synx_client *client = NULL;
  911. if (cb && cb->client) {
  912. client = cb->client;
  913. dprintk(SYNX_VERB,
  914. "[sess :%llu] user cb queued for handle %d\n",
  915. client->id, h_synx);
  916. cb->kernel_cb.status = status;
  917. mutex_lock(&client->event_q_lock);
  918. list_add_tail(&cb->node, &client->event_q);
  919. mutex_unlock(&client->event_q_lock);
  920. wake_up_all(&client->event_wq);
  921. } else {
  922. dprintk(SYNX_ERR, "invalid params\n");
  923. }
  924. }
  925. void synx_util_callback_dispatch(struct synx_coredata *synx_obj, u32 status)
  926. {
  927. struct synx_cb_data *synx_cb, *synx_cb_temp;
  928. if (IS_ERR_OR_NULL(synx_obj)) {
  929. dprintk(SYNX_ERR, "invalid arguments\n");
  930. return;
  931. }
  932. list_for_each_entry_safe(synx_cb,
  933. synx_cb_temp, &synx_obj->reg_cbs_list, node) {
  934. synx_cb->status = status;
  935. list_del_init(&synx_cb->node);
  936. queue_work(synx_dev->wq_cb,
  937. &synx_cb->cb_dispatch);
  938. dprintk(SYNX_VERB, "dispatched callback\n");
  939. }
  940. }
  941. void synx_util_cb_dispatch(struct work_struct *cb_dispatch)
  942. {
  943. struct synx_cb_data *synx_cb =
  944. container_of(cb_dispatch, struct synx_cb_data, cb_dispatch);
  945. struct synx_client *client;
  946. struct synx_client_cb *cb;
  947. struct synx_kernel_payload payload;
  948. u32 status;
  949. client = synx_get_client(synx_cb->session);
  950. if (IS_ERR_OR_NULL(client)) {
  951. dprintk(SYNX_ERR,
  952. "invalid session data %pK in cb payload\n",
  953. synx_cb->session);
  954. goto free;
  955. }
  956. if (synx_cb->idx == 0 ||
  957. synx_cb->idx >= SYNX_MAX_OBJS) {
  958. dprintk(SYNX_ERR,
  959. "[sess :%llu] invalid cb index %u\n",
  960. client->id, synx_cb->idx);
  961. goto fail;
  962. }
  963. status = synx_cb->status;
  964. cb = &client->cb_table[synx_cb->idx];
  965. if (!cb->is_valid) {
  966. dprintk(SYNX_ERR, "invalid cb payload\n");
  967. goto fail;
  968. }
  969. memcpy(&payload, &cb->kernel_cb, sizeof(cb->kernel_cb));
  970. payload.status = status;
  971. if (payload.cb_func == synx_util_default_user_callback) {
  972. /*
  973. * need to send client cb data for default
  974. * user cb (userspace cb)
  975. */
  976. payload.data = cb;
  977. } else {
  978. /*
  979. * clear the cb entry. userspace cb entry
  980. * will be cleared after data read by the
  981. * polling thread or when client is destroyed
  982. */
  983. if (synx_util_clear_cb_entry(client, cb))
  984. dprintk(SYNX_ERR,
  985. "[sess :%llu] error clearing cb entry\n",
  986. client->id);
  987. }
  988. dprintk(SYNX_INFO,
  989. "callback dispatched for handle %u, status %u, data %pK\n",
  990. payload.h_synx, payload.status, payload.data);
  991. /* dispatch kernel callback */
  992. payload.cb_func(payload.h_synx,
  993. payload.status, payload.data);
  994. fail:
  995. synx_put_client(client);
  996. free:
  997. kfree(synx_cb);
  998. }
  999. u32 synx_util_get_fence_entry(u64 key, u32 global)
  1000. {
  1001. u32 h_synx = 0;
  1002. struct synx_fence_entry *curr;
  1003. spin_lock_bh(&synx_dev->native->fence_map_lock);
  1004. hash_for_each_possible(synx_dev->native->fence_map,
  1005. curr, node, key) {
  1006. if (curr->key == key) {
  1007. if (global)
  1008. h_synx = curr->g_handle;
  1009. /* return local handle if global not available */
  1010. if (h_synx == 0)
  1011. h_synx = curr->l_handle;
  1012. break;
  1013. }
  1014. }
  1015. spin_unlock_bh(&synx_dev->native->fence_map_lock);
  1016. return h_synx;
  1017. }
  1018. void synx_util_release_fence_entry(u64 key)
  1019. {
  1020. struct synx_fence_entry *entry = NULL, *curr;
  1021. spin_lock_bh(&synx_dev->native->fence_map_lock);
  1022. hash_for_each_possible(synx_dev->native->fence_map,
  1023. curr, node, key) {
  1024. if (curr->key == key) {
  1025. entry = curr;
  1026. break;
  1027. }
  1028. }
  1029. if (entry) {
  1030. hash_del(&entry->node);
  1031. dprintk(SYNX_MEM,
  1032. "released fence entry %pK for fence %pK\n",
  1033. entry, (void *)key);
  1034. kfree(entry);
  1035. }
  1036. spin_unlock_bh(&synx_dev->native->fence_map_lock);
  1037. }
  1038. int synx_util_insert_fence_entry(struct synx_fence_entry *entry,
  1039. u32 *h_synx, u32 global)
  1040. {
  1041. int rc = SYNX_SUCCESS;
  1042. struct synx_fence_entry *curr;
  1043. if (IS_ERR_OR_NULL(entry) || IS_ERR_OR_NULL(h_synx))
  1044. return -SYNX_INVALID;
  1045. spin_lock_bh(&synx_dev->native->fence_map_lock);
  1046. hash_for_each_possible(synx_dev->native->fence_map,
  1047. curr, node, entry->key) {
  1048. /* raced with import from another process on same fence */
  1049. if (curr->key == entry->key) {
  1050. if (global)
  1051. *h_synx = curr->g_handle;
  1052. if (*h_synx == 0 || !global)
  1053. *h_synx = curr->l_handle;
  1054. rc = -SYNX_ALREADY;
  1055. break;
  1056. }
  1057. }
  1058. /* add entry only if its not present in the map */
  1059. if (rc == SYNX_SUCCESS) {
  1060. hash_add(synx_dev->native->fence_map,
  1061. &entry->node, entry->key);
  1062. dprintk(SYNX_MEM,
  1063. "added fence entry %pK for fence %pK\n",
  1064. entry, (void *)entry->key);
  1065. }
  1066. spin_unlock_bh(&synx_dev->native->fence_map_lock);
  1067. return rc;
  1068. }
  1069. struct synx_client *synx_get_client(struct synx_session *session)
  1070. {
  1071. struct synx_client *client = NULL;
  1072. struct synx_client *curr;
  1073. if (IS_ERR_OR_NULL(session))
  1074. return ERR_PTR(-SYNX_INVALID);
  1075. spin_lock_bh(&synx_dev->native->metadata_map_lock);
  1076. hash_for_each_possible(synx_dev->native->client_metadata_map,
  1077. curr, node, (u64)session) {
  1078. if (curr == (struct synx_client *)session) {
  1079. if (curr->active) {
  1080. kref_get(&curr->refcount);
  1081. client = curr;
  1082. }
  1083. break;
  1084. }
  1085. }
  1086. spin_unlock_bh(&synx_dev->native->metadata_map_lock);
  1087. return client;
  1088. }
  1089. static void synx_client_cleanup(struct work_struct *dispatch)
  1090. {
  1091. int i, j;
  1092. struct synx_client *client =
  1093. container_of(dispatch, struct synx_client, dispatch);
  1094. struct synx_handle_coredata *curr;
  1095. struct hlist_node *tmp;
  1096. /*
  1097. * go over all the remaining synx obj handles
  1098. * un-released from this session and remove them.
  1099. */
  1100. hash_for_each_safe(client->handle_map, i, tmp, curr, node) {
  1101. dprintk(SYNX_WARN,
  1102. "[sess :%llu] un-released handle %u\n",
  1103. client->id, curr->key);
  1104. j = kref_read(&curr->refcount);
  1105. /* release pending reference */
  1106. while (j--)
  1107. kref_put(&curr->refcount, synx_util_destroy_handle);
  1108. }
  1109. mutex_destroy(&client->event_q_lock);
  1110. dprintk(SYNX_VERB, "session %llu [%s] destroyed %pK\n",
  1111. client->id, client->name, client);
  1112. vfree(client);
  1113. }
  1114. static void synx_client_destroy(struct kref *kref)
  1115. {
  1116. struct synx_client *client =
  1117. container_of(kref, struct synx_client, refcount);
  1118. hash_del(&client->node);
  1119. dprintk(SYNX_INFO, "[sess :%llu] session removed %s\n",
  1120. client->id, client->name);
  1121. INIT_WORK(&client->dispatch, synx_client_cleanup);
  1122. queue_work(synx_dev->wq_cleanup, &client->dispatch);
  1123. }
  1124. void synx_put_client(struct synx_client *client)
  1125. {
  1126. if (IS_ERR_OR_NULL(client))
  1127. return;
  1128. spin_lock_bh(&synx_dev->native->metadata_map_lock);
  1129. kref_put(&client->refcount, synx_client_destroy);
  1130. spin_unlock_bh(&synx_dev->native->metadata_map_lock);
  1131. }
  1132. void synx_util_generate_timestamp(char *timestamp, size_t size)
  1133. {
  1134. struct timespec64 tv;
  1135. struct tm tm;
  1136. ktime_get_real_ts64(&tv);
  1137. time64_to_tm(tv.tv_sec, 0, &tm);
  1138. snprintf(timestamp, size, "%02d-%02d %02d:%02d:%02d",
  1139. tm.tm_mon + 1, tm.tm_mday, tm.tm_hour,
  1140. tm.tm_min, tm.tm_sec);
  1141. }
  1142. void synx_util_log_error(u32 client_id, u32 h_synx, s32 err)
  1143. {
  1144. struct error_node *err_node;
  1145. if (!synx_dev->debugfs_root)
  1146. return;
  1147. err_node = kzalloc(sizeof(*err_node), GFP_KERNEL);
  1148. if (!err_node)
  1149. return;
  1150. err_node->client_id = client_id;
  1151. err_node->error_code = err;
  1152. err_node->h_synx = h_synx;
  1153. synx_util_generate_timestamp(err_node->timestamp,
  1154. sizeof(err_node->timestamp));
  1155. mutex_lock(&synx_dev->error_lock);
  1156. list_add(&err_node->node,
  1157. &synx_dev->error_list);
  1158. mutex_unlock(&synx_dev->error_lock);
  1159. }
  1160. int synx_util_save_data(void *fence, u32 flags,
  1161. u32 h_synx)
  1162. {
  1163. int rc = SYNX_SUCCESS;
  1164. struct synx_entry_64 *entry, *curr;
  1165. u64 key;
  1166. u32 tbl = synx_util_map_params_to_type(flags);
  1167. switch (tbl) {
  1168. case SYNX_TYPE_CSL:
  1169. key = *(u32 *)fence;
  1170. spin_lock_bh(&synx_dev->native->csl_map_lock);
  1171. /* ensure fence is not already added to map */
  1172. hash_for_each_possible(synx_dev->native->csl_fence_map,
  1173. curr, node, key) {
  1174. if (curr->key == key) {
  1175. rc = -SYNX_ALREADY;
  1176. break;
  1177. }
  1178. }
  1179. if (rc == SYNX_SUCCESS) {
  1180. entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
  1181. if (entry) {
  1182. entry->data[0] = h_synx;
  1183. entry->key = key;
  1184. kref_init(&entry->refcount);
  1185. hash_add(synx_dev->native->csl_fence_map,
  1186. &entry->node, entry->key);
  1187. dprintk(SYNX_MEM, "added csl fence %d to map %pK\n",
  1188. entry->key, entry);
  1189. } else {
  1190. rc = -SYNX_NOMEM;
  1191. }
  1192. }
  1193. spin_unlock_bh(&synx_dev->native->csl_map_lock);
  1194. break;
  1195. default:
  1196. dprintk(SYNX_ERR, "invalid hash table selection\n");
  1197. kfree(entry);
  1198. rc = -SYNX_INVALID;
  1199. }
  1200. return rc;
  1201. }
  1202. struct synx_entry_64 *synx_util_retrieve_data(void *fence,
  1203. u32 type)
  1204. {
  1205. u64 key;
  1206. struct synx_entry_64 *entry = NULL;
  1207. struct synx_entry_64 *curr;
  1208. switch (type) {
  1209. case SYNX_TYPE_CSL:
  1210. key = *(u32 *)fence;
  1211. spin_lock_bh(&synx_dev->native->csl_map_lock);
  1212. hash_for_each_possible(synx_dev->native->csl_fence_map,
  1213. curr, node, key) {
  1214. if (curr->key == key) {
  1215. kref_get(&curr->refcount);
  1216. entry = curr;
  1217. break;
  1218. }
  1219. }
  1220. spin_unlock_bh(&synx_dev->native->csl_map_lock);
  1221. break;
  1222. default:
  1223. dprintk(SYNX_ERR, "invalid hash table selection %u\n",
  1224. type);
  1225. }
  1226. return entry;
  1227. }
  1228. static void synx_util_destroy_data(struct kref *kref)
  1229. {
  1230. struct synx_entry_64 *entry =
  1231. container_of(kref, struct synx_entry_64, refcount);
  1232. hash_del(&entry->node);
  1233. dprintk(SYNX_MEM, "released fence %llu entry %pK\n",
  1234. entry->key, entry);
  1235. kfree(entry);
  1236. }
  1237. void synx_util_remove_data(void *fence,
  1238. u32 type)
  1239. {
  1240. u64 key;
  1241. struct synx_entry_64 *entry = NULL;
  1242. struct synx_entry_64 *curr;
  1243. if (IS_ERR_OR_NULL(fence))
  1244. return;
  1245. switch (type) {
  1246. case SYNX_TYPE_CSL:
  1247. key = *((u32 *)fence);
  1248. spin_lock_bh(&synx_dev->native->csl_map_lock);
  1249. hash_for_each_possible(synx_dev->native->csl_fence_map,
  1250. curr, node, key) {
  1251. if (curr->key == key) {
  1252. entry = curr;
  1253. break;
  1254. }
  1255. }
  1256. if (entry)
  1257. kref_put(&entry->refcount, synx_util_destroy_data);
  1258. spin_unlock_bh(&synx_dev->native->csl_map_lock);
  1259. break;
  1260. default:
  1261. dprintk(SYNX_ERR, "invalid hash table selection %u\n",
  1262. type);
  1263. }
  1264. }
  1265. void synx_util_map_import_params_to_create(
  1266. struct synx_import_indv_params *params,
  1267. struct synx_create_params *c_params)
  1268. {
  1269. if (IS_ERR_OR_NULL(params) || IS_ERR_OR_NULL(c_params))
  1270. return;
  1271. if (params->flags & SYNX_IMPORT_GLOBAL_FENCE)
  1272. c_params->flags |= SYNX_CREATE_GLOBAL_FENCE;
  1273. if (params->flags & SYNX_IMPORT_LOCAL_FENCE)
  1274. c_params->flags |= SYNX_CREATE_LOCAL_FENCE;
  1275. if (params->flags & SYNX_IMPORT_DMA_FENCE)
  1276. c_params->flags |= SYNX_CREATE_DMA_FENCE;
  1277. }
  1278. u32 synx_util_map_client_id_to_core(
  1279. enum synx_client_id id)
  1280. {
  1281. u32 core_id;
  1282. switch (id) {
  1283. case SYNX_CLIENT_NATIVE:
  1284. core_id = SYNX_CORE_APSS; break;
  1285. case SYNX_CLIENT_EVA_CTX0:
  1286. core_id = SYNX_CORE_EVA; break;
  1287. case SYNX_CLIENT_VID_CTX0:
  1288. core_id = SYNX_CORE_IRIS; break;
  1289. case SYNX_CLIENT_NSP_CTX0:
  1290. core_id = SYNX_CORE_NSP; break;
  1291. default:
  1292. core_id = SYNX_CORE_MAX;
  1293. }
  1294. return core_id;
  1295. }