synx_global.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2022-2023, Qualcomm Innovation Center, Inc. All rights reserved.
  4. */
  5. #include <linux/hwspinlock.h>
  6. #include <linux/string.h>
  7. #include "synx_debugfs.h"
  8. #include "synx_global.h"
  9. static struct synx_shared_mem synx_gmem;
  10. static struct hwspinlock *synx_hwlock;
  11. static u32 synx_gmem_lock_owner(u32 idx)
  12. {
  13. /*
  14. * subscribers field of global table index 0 is used to
  15. * maintain synx gmem lock owner data.
  16. * core updates the field after acquiring the lock and
  17. * before releasing the lock appropriately.
  18. */
  19. return synx_gmem.table[0].subscribers;
  20. }
  21. static void synx_gmem_lock_owner_set(u32 idx)
  22. {
  23. synx_gmem.table[0].subscribers = SYNX_CORE_APSS;
  24. }
  25. static void synx_gmem_lock_owner_clear(u32 idx)
  26. {
  27. if (synx_gmem.table[0].subscribers != SYNX_CORE_APSS)
  28. dprintk(SYNX_WARN, "reset lock owned by core %u\n",
  29. synx_gmem.table[0].subscribers);
  30. synx_gmem.table[0].subscribers = SYNX_CORE_MAX;
  31. }
  32. static int synx_gmem_lock(u32 idx, unsigned long *flags)
  33. {
  34. int rc;
  35. if (!synx_hwlock)
  36. return -SYNX_INVALID;
  37. rc = hwspin_lock_timeout_irqsave(
  38. synx_hwlock, SYNX_HWSPIN_TIMEOUT, flags);
  39. if (!rc)
  40. synx_gmem_lock_owner_set(idx);
  41. return rc;
  42. }
  43. static void synx_gmem_unlock(u32 idx, unsigned long *flags)
  44. {
  45. synx_gmem_lock_owner_clear(idx);
  46. hwspin_unlock_irqrestore(synx_hwlock, flags);
  47. }
  48. static void synx_global_print_data(
  49. struct synx_global_coredata *synx_g_obj,
  50. const char *func)
  51. {
  52. int i = 0;
  53. dprintk(SYNX_VERB, "%s: status %u, handle %u, refcount %u",
  54. func, synx_g_obj->status,
  55. synx_g_obj->handle, synx_g_obj->refcount);
  56. dprintk(SYNX_VERB, "%s: subscribers %u, waiters %u, pending %u",
  57. func, synx_g_obj->subscribers, synx_g_obj->waiters,
  58. synx_g_obj->num_child);
  59. for (i = 0; i < SYNX_GLOBAL_MAX_PARENTS; i++)
  60. if (synx_g_obj->parents[i])
  61. dprintk(SYNX_VERB, "%s: parents %u:%u",
  62. func, i, synx_g_obj->parents[i]);
  63. }
  64. bool synx_fetch_global_shared_memory_handle_details(u32 synx_handle,
  65. struct synx_global_coredata *synx_global_entry)
  66. {
  67. int rc = SYNX_SUCCESS;
  68. u32 idx;
  69. unsigned long flags;
  70. struct synx_global_coredata *entry;
  71. if (!synx_gmem.table) {
  72. dprintk(SYNX_VERB, "synx_gmem is NULL\n");
  73. return false;
  74. }
  75. idx = synx_handle & SYNX_HANDLE_INDEX_MASK;
  76. if (!synx_is_valid_idx(idx))
  77. return false;
  78. rc = synx_gmem_lock(idx, &flags);
  79. if (rc) {
  80. dprintk(SYNX_VERB, "Failed to lock entry %d\n", idx);
  81. return false;
  82. }
  83. entry = &synx_gmem.table[idx];
  84. memcpy(synx_global_entry, entry, sizeof(struct synx_global_coredata));
  85. synx_gmem_unlock(idx, &flags);
  86. return true;
  87. }
  88. int synx_global_dump_shared_memory(void)
  89. {
  90. int rc = SYNX_SUCCESS, idx;
  91. unsigned long flags;
  92. struct synx_global_coredata *synx_g_obj;
  93. if (!synx_gmem.table)
  94. return -SYNX_INVALID;
  95. /* Print bitmap memory*/
  96. for (idx = 0; idx < SHRD_MEM_DUMP_NUM_BMAP_WORDS; idx++) {
  97. rc = synx_gmem_lock(idx, &flags);
  98. if (rc)
  99. return rc;
  100. dprintk(SYNX_VERB, "%s: idx %d, bitmap value %d",
  101. __func__, idx, synx_gmem.bitmap[idx]);
  102. synx_gmem_unlock(idx, &flags);
  103. }
  104. /* Print table memory*/
  105. for (idx = 0;
  106. idx < SHRD_MEM_DUMP_NUM_BMAP_WORDS * sizeof(u32) * NUM_CHAR_BIT;
  107. idx++) {
  108. rc = synx_gmem_lock(idx, &flags);
  109. if (rc)
  110. return rc;
  111. dprintk(SYNX_VERB, "%s: idx %d\n", __func__, idx);
  112. synx_g_obj = &synx_gmem.table[idx];
  113. synx_global_print_data(synx_g_obj, __func__);
  114. synx_gmem_unlock(idx, &flags);
  115. }
  116. return rc;
  117. }
  118. static int synx_gmem_init(void)
  119. {
  120. if (!synx_gmem.table)
  121. return -SYNX_NOMEM;
  122. synx_hwlock = hwspin_lock_request_specific(SYNX_HWSPIN_ID);
  123. if (!synx_hwlock) {
  124. dprintk(SYNX_ERR, "hwspinlock request failed\n");
  125. return -SYNX_NOMEM;
  126. }
  127. /* zero idx not allocated for clients */
  128. ipclite_global_test_and_set_bit(0,
  129. (ipclite_atomic_uint32_t *)synx_gmem.bitmap);
  130. memset(&synx_gmem.table[0], 0, sizeof(struct synx_global_coredata));
  131. return SYNX_SUCCESS;
  132. }
  133. u32 synx_global_map_core_id(enum synx_core_id id)
  134. {
  135. u32 host_id;
  136. switch (id) {
  137. case SYNX_CORE_APSS:
  138. host_id = IPCMEM_APPS; break;
  139. case SYNX_CORE_NSP:
  140. host_id = IPCMEM_CDSP; break;
  141. case SYNX_CORE_IRIS:
  142. host_id = IPCMEM_VPU; break;
  143. case SYNX_CORE_EVA:
  144. host_id = IPCMEM_CVP; break;
  145. case SYNX_CORE_ICP:
  146. host_id = IPCMEM_CAM; break;
  147. default:
  148. host_id = IPCMEM_NUM_HOSTS;
  149. dprintk(SYNX_ERR, "invalid core id\n");
  150. }
  151. return host_id;
  152. }
  153. int synx_global_alloc_index(u32 *idx)
  154. {
  155. int rc = SYNX_SUCCESS;
  156. u32 prev, index;
  157. const u32 size = SYNX_GLOBAL_MAX_OBJS;
  158. if (!synx_gmem.table)
  159. return -SYNX_NOMEM;
  160. if (IS_ERR_OR_NULL(idx))
  161. return -SYNX_INVALID;
  162. do {
  163. index = find_first_zero_bit((unsigned long *)synx_gmem.bitmap, size);
  164. if (index >= size) {
  165. rc = -SYNX_NOMEM;
  166. break;
  167. }
  168. prev = ipclite_global_test_and_set_bit(index % 32,
  169. (ipclite_atomic_uint32_t *)(synx_gmem.bitmap + index/32));
  170. if ((prev & (1UL << (index % 32))) == 0) {
  171. *idx = index;
  172. dprintk(SYNX_MEM, "allocated global idx %u\n", *idx);
  173. break;
  174. }
  175. } while (true);
  176. return rc;
  177. }
  178. int synx_global_init_coredata(u32 h_synx)
  179. {
  180. int rc;
  181. unsigned long flags;
  182. struct synx_global_coredata *synx_g_obj;
  183. u32 idx = h_synx & SYNX_HANDLE_INDEX_MASK;
  184. if (!synx_gmem.table)
  185. return -SYNX_NOMEM;
  186. if (!synx_is_valid_idx(idx))
  187. return -SYNX_INVALID;
  188. rc = synx_gmem_lock(idx, &flags);
  189. if (rc)
  190. return rc;
  191. synx_g_obj = &synx_gmem.table[idx];
  192. if (synx_g_obj->status != 0 || synx_g_obj->refcount != 0 ||
  193. synx_g_obj->subscribers != 0 || synx_g_obj->handle != 0 ||
  194. synx_g_obj->parents[0] != 0) {
  195. dprintk(SYNX_ERR,
  196. "entry not cleared for idx %u,\n"
  197. "synx_g_obj->status %d,\n"
  198. "synx_g_obj->refcount %d,\n"
  199. "synx_g_obj->subscribers %d,\n"
  200. "synx_g_obj->handle %u,\n"
  201. "synx_g_obj->parents[0] %d\n",
  202. idx, synx_g_obj->status,
  203. synx_g_obj->refcount,
  204. synx_g_obj->subscribers,
  205. synx_g_obj->handle,
  206. synx_g_obj->parents[0]);
  207. synx_gmem_unlock(idx, &flags);
  208. return -SYNX_INVALID;
  209. }
  210. memset(synx_g_obj, 0, sizeof(*synx_g_obj));
  211. /* set status to active */
  212. synx_g_obj->status = SYNX_STATE_ACTIVE;
  213. synx_g_obj->refcount = 1;
  214. synx_g_obj->subscribers = (1UL << SYNX_CORE_APSS);
  215. synx_g_obj->handle = h_synx;
  216. synx_gmem_unlock(idx, &flags);
  217. return SYNX_SUCCESS;
  218. }
  219. static int synx_global_get_waiting_cores_locked(
  220. struct synx_global_coredata *synx_g_obj,
  221. bool *cores)
  222. {
  223. int i;
  224. synx_global_print_data(synx_g_obj, __func__);
  225. for (i = 0; i < SYNX_CORE_MAX; i++) {
  226. if (synx_g_obj->waiters & (1UL << i)) {
  227. cores[i] = true;
  228. dprintk(SYNX_VERB,
  229. "waiting for handle %u/n",
  230. synx_g_obj->handle);
  231. }
  232. }
  233. /* clear waiter list so signals are not repeated */
  234. synx_g_obj->waiters = 0;
  235. return SYNX_SUCCESS;
  236. }
  237. int synx_global_get_waiting_cores(u32 idx, bool *cores)
  238. {
  239. int rc;
  240. unsigned long flags;
  241. struct synx_global_coredata *synx_g_obj;
  242. if (!synx_gmem.table)
  243. return -SYNX_NOMEM;
  244. if (IS_ERR_OR_NULL(cores) || !synx_is_valid_idx(idx))
  245. return -SYNX_INVALID;
  246. rc = synx_gmem_lock(idx, &flags);
  247. if (rc)
  248. return rc;
  249. synx_g_obj = &synx_gmem.table[idx];
  250. synx_global_get_waiting_cores_locked(synx_g_obj, cores);
  251. synx_gmem_unlock(idx, &flags);
  252. return SYNX_SUCCESS;
  253. }
  254. int synx_global_set_waiting_core(u32 idx, enum synx_core_id id)
  255. {
  256. int rc;
  257. unsigned long flags;
  258. struct synx_global_coredata *synx_g_obj;
  259. if (!synx_gmem.table)
  260. return -SYNX_NOMEM;
  261. if (id >= SYNX_CORE_MAX || !synx_is_valid_idx(idx))
  262. return -SYNX_INVALID;
  263. rc = synx_gmem_lock(idx, &flags);
  264. if (rc)
  265. return rc;
  266. synx_g_obj = &synx_gmem.table[idx];
  267. synx_g_obj->waiters |= (1UL << id);
  268. synx_gmem_unlock(idx, &flags);
  269. return SYNX_SUCCESS;
  270. }
  271. int synx_global_get_subscribed_cores(u32 idx, bool *cores)
  272. {
  273. int i;
  274. int rc;
  275. unsigned long flags;
  276. struct synx_global_coredata *synx_g_obj;
  277. if (!synx_gmem.table)
  278. return -SYNX_NOMEM;
  279. if (IS_ERR_OR_NULL(cores) || !synx_is_valid_idx(idx))
  280. return -SYNX_INVALID;
  281. rc = synx_gmem_lock(idx, &flags);
  282. if (rc)
  283. return rc;
  284. synx_g_obj = &synx_gmem.table[idx];
  285. for (i = 0; i < SYNX_CORE_MAX; i++)
  286. if (synx_g_obj->subscribers & (1UL << i))
  287. cores[i] = true;
  288. synx_gmem_unlock(idx, &flags);
  289. return SYNX_SUCCESS;
  290. }
  291. int synx_global_fetch_handle_details(u32 idx, u32 *h_synx)
  292. {
  293. int rc;
  294. unsigned long flags;
  295. struct synx_global_coredata *synx_g_obj;
  296. if (!synx_gmem.table)
  297. return -SYNX_NOMEM;
  298. if (IS_ERR_OR_NULL(h_synx) || !synx_is_valid_idx(idx))
  299. return -SYNX_INVALID;
  300. rc = synx_gmem_lock(idx, &flags);
  301. if (rc)
  302. return rc;
  303. synx_g_obj = &synx_gmem.table[idx];
  304. *h_synx = synx_g_obj->handle;
  305. synx_gmem_unlock(idx, &flags);
  306. return SYNX_SUCCESS;
  307. }
  308. int synx_global_set_subscribed_core(u32 idx, enum synx_core_id id)
  309. {
  310. int rc;
  311. unsigned long flags;
  312. struct synx_global_coredata *synx_g_obj;
  313. if (!synx_gmem.table)
  314. return -SYNX_NOMEM;
  315. if (id >= SYNX_CORE_MAX || !synx_is_valid_idx(idx))
  316. return -SYNX_INVALID;
  317. rc = synx_gmem_lock(idx, &flags);
  318. if (rc)
  319. return rc;
  320. synx_g_obj = &synx_gmem.table[idx];
  321. synx_g_obj->subscribers |= (1UL << id);
  322. synx_gmem_unlock(idx, &flags);
  323. return SYNX_SUCCESS;
  324. }
  325. int synx_global_clear_subscribed_core(u32 idx, enum synx_core_id id)
  326. {
  327. int rc;
  328. unsigned long flags;
  329. struct synx_global_coredata *synx_g_obj;
  330. if (!synx_gmem.table)
  331. return -SYNX_NOMEM;
  332. if (id >= SYNX_CORE_MAX || !synx_is_valid_idx(idx))
  333. return -SYNX_INVALID;
  334. rc = synx_gmem_lock(idx, &flags);
  335. if (rc)
  336. return rc;
  337. synx_g_obj = &synx_gmem.table[idx];
  338. synx_g_obj->subscribers &= ~(1UL << id);
  339. synx_gmem_unlock(idx, &flags);
  340. return SYNX_SUCCESS;
  341. }
  342. u32 synx_global_get_parents_num(u32 idx)
  343. {
  344. int rc;
  345. unsigned long flags;
  346. struct synx_global_coredata *synx_g_obj;
  347. u32 i, count = 0;
  348. if (!synx_gmem.table)
  349. return 0;
  350. if (!synx_is_valid_idx(idx))
  351. return 0;
  352. rc = synx_gmem_lock(idx, &flags);
  353. if (rc)
  354. return rc;
  355. synx_g_obj = &synx_gmem.table[idx];
  356. for (i = 0; i < SYNX_GLOBAL_MAX_PARENTS; i++) {
  357. if (synx_g_obj->parents[i] != 0)
  358. count++;
  359. }
  360. synx_gmem_unlock(idx, &flags);
  361. return count;
  362. }
  363. static int synx_global_get_parents_locked(
  364. struct synx_global_coredata *synx_g_obj, u32 *parents)
  365. {
  366. u32 i;
  367. if (!synx_g_obj || !parents)
  368. return -SYNX_NOMEM;
  369. for (i = 0; i < SYNX_GLOBAL_MAX_PARENTS; i++)
  370. parents[i] = synx_g_obj->parents[i];
  371. return SYNX_SUCCESS;
  372. }
  373. int synx_global_get_parents(u32 idx, u32 *parents)
  374. {
  375. int rc;
  376. unsigned long flags;
  377. struct synx_global_coredata *synx_g_obj;
  378. if (!synx_gmem.table || !parents)
  379. return -SYNX_NOMEM;
  380. if (!synx_is_valid_idx(idx))
  381. return -SYNX_INVALID;
  382. rc = synx_gmem_lock(idx, &flags);
  383. if (rc)
  384. return rc;
  385. synx_g_obj = &synx_gmem.table[idx];
  386. rc = synx_global_get_parents_locked(synx_g_obj, parents);
  387. synx_gmem_unlock(idx, &flags);
  388. return rc;
  389. }
  390. u32 synx_global_get_status(u32 idx)
  391. {
  392. int rc;
  393. unsigned long flags;
  394. u32 status = SYNX_STATE_ACTIVE;
  395. struct synx_global_coredata *synx_g_obj;
  396. if (!synx_gmem.table)
  397. return 0;
  398. if (!synx_is_valid_idx(idx))
  399. return 0;
  400. rc = synx_gmem_lock(idx, &flags);
  401. if (rc)
  402. return rc;
  403. synx_g_obj = &synx_gmem.table[idx];
  404. if (synx_g_obj->status != SYNX_STATE_ACTIVE && synx_g_obj->num_child == 0)
  405. status = synx_g_obj->status;
  406. synx_gmem_unlock(idx, &flags);
  407. return status;
  408. }
  409. u32 synx_global_test_status_set_wait(u32 idx,
  410. enum synx_core_id id)
  411. {
  412. int rc;
  413. unsigned long flags;
  414. u32 status;
  415. struct synx_global_coredata *synx_g_obj;
  416. if (!synx_gmem.table)
  417. return 0;
  418. if (id >= SYNX_CORE_MAX || !synx_is_valid_idx(idx))
  419. return 0;
  420. rc = synx_gmem_lock(idx, &flags);
  421. if (rc)
  422. return 0;
  423. synx_g_obj = &synx_gmem.table[idx];
  424. synx_global_print_data(synx_g_obj, __func__);
  425. status = synx_g_obj->status;
  426. /* if handle is still ACTIVE */
  427. if (status == SYNX_STATE_ACTIVE || synx_g_obj->num_child != 0) {
  428. synx_g_obj->waiters |= (1UL << id);
  429. status = SYNX_STATE_ACTIVE;
  430. }
  431. else
  432. dprintk(SYNX_DBG, "handle %u already signaled %u",
  433. synx_g_obj->handle, synx_g_obj->status);
  434. synx_gmem_unlock(idx, &flags);
  435. return status;
  436. }
  437. static int synx_global_update_status_core(u32 idx,
  438. u32 status)
  439. {
  440. u32 i, p_idx;
  441. int rc;
  442. bool clear = false;
  443. unsigned long flags;
  444. uint64_t data;
  445. struct synx_global_coredata *synx_g_obj;
  446. u32 h_parents[SYNX_GLOBAL_MAX_PARENTS] = {0};
  447. bool wait_cores[SYNX_CORE_MAX] = {false};
  448. rc = synx_gmem_lock(idx, &flags);
  449. if (rc)
  450. return rc;
  451. synx_g_obj = &synx_gmem.table[idx];
  452. synx_global_print_data(synx_g_obj, __func__);
  453. /* prepare for cross core signaling */
  454. data = synx_g_obj->handle;
  455. data <<= 32;
  456. if (synx_g_obj->num_child != 0) {
  457. /* composite handle */
  458. synx_g_obj->num_child--;
  459. if (synx_g_obj->status == SYNX_STATE_ACTIVE ||
  460. (status > SYNX_STATE_SIGNALED_SUCCESS &&
  461. status <= SYNX_STATE_SIGNALED_MAX))
  462. synx_g_obj->status = status;
  463. if (synx_g_obj->num_child == 0) {
  464. data |= synx_g_obj->status;
  465. synx_global_get_waiting_cores_locked(synx_g_obj,
  466. wait_cores);
  467. synx_global_get_parents_locked(synx_g_obj, h_parents);
  468. /* release ref held by constituting handles */
  469. synx_g_obj->refcount--;
  470. if (synx_g_obj->refcount == 0) {
  471. memset(synx_g_obj, 0,
  472. sizeof(*synx_g_obj));
  473. clear = true;
  474. }
  475. } else {
  476. /* pending notification from handles */
  477. data = 0;
  478. dprintk(SYNX_DBG,
  479. "Child notified parent handle %u, pending %u\n",
  480. synx_g_obj->handle, synx_g_obj->num_child);
  481. }
  482. } else {
  483. synx_g_obj->status = status;
  484. data |= synx_g_obj->status;
  485. synx_global_get_waiting_cores_locked(synx_g_obj,
  486. wait_cores);
  487. synx_global_get_parents_locked(synx_g_obj, h_parents);
  488. }
  489. synx_gmem_unlock(idx, &flags);
  490. if (clear) {
  491. ipclite_global_test_and_clear_bit(idx%32,
  492. (ipclite_atomic_uint32_t *)(synx_gmem.bitmap + idx/32));
  493. dprintk(SYNX_MEM,
  494. "cleared global idx %u\n", idx);
  495. }
  496. /* notify waiting clients on signal */
  497. if (data) {
  498. /* notify wait client */
  499. /* In case of SSR, someone might be waiting on same core
  500. * However, in other cases, synx_signal API will take care
  501. * of signaling handles on same core and thus we don't need
  502. * to send interrupt
  503. */
  504. if (status == SYNX_STATE_SIGNALED_SSR)
  505. i = 0;
  506. else
  507. i = 1;
  508. for (; i < SYNX_CORE_MAX ; i++) {
  509. if (!wait_cores[i])
  510. continue;
  511. dprintk(SYNX_DBG,
  512. "invoking ipc signal handle %u, status %u\n",
  513. synx_g_obj->handle, synx_g_obj->status);
  514. if (ipclite_msg_send(
  515. synx_global_map_core_id(i),
  516. data))
  517. dprintk(SYNX_ERR,
  518. "ipc signaling %llu to core %u failed\n",
  519. data, i);
  520. }
  521. }
  522. /* handle parent notifications */
  523. for (i = 0; i < SYNX_GLOBAL_MAX_PARENTS; i++) {
  524. p_idx = h_parents[i];
  525. if (p_idx == 0)
  526. continue;
  527. synx_global_update_status_core(p_idx, status);
  528. }
  529. return SYNX_SUCCESS;
  530. }
  531. int synx_global_update_status(u32 idx, u32 status)
  532. {
  533. int rc = -SYNX_INVALID;
  534. unsigned long flags;
  535. struct synx_global_coredata *synx_g_obj;
  536. if (!synx_gmem.table)
  537. return -SYNX_NOMEM;
  538. if (!synx_is_valid_idx(idx) || status <= SYNX_STATE_ACTIVE)
  539. return -SYNX_INVALID;
  540. rc = synx_gmem_lock(idx, &flags);
  541. if (rc)
  542. return rc;
  543. synx_g_obj = &synx_gmem.table[idx];
  544. if (synx_g_obj->num_child != 0) {
  545. /* composite handle cannot be signaled */
  546. goto fail;
  547. } else if (synx_g_obj->status != SYNX_STATE_ACTIVE) {
  548. rc = -SYNX_ALREADY;
  549. goto fail;
  550. }
  551. synx_gmem_unlock(idx, &flags);
  552. return synx_global_update_status_core(idx, status);
  553. fail:
  554. synx_gmem_unlock(idx, &flags);
  555. return rc;
  556. }
  557. int synx_global_get_ref(u32 idx)
  558. {
  559. int rc;
  560. unsigned long flags;
  561. struct synx_global_coredata *synx_g_obj;
  562. if (!synx_gmem.table)
  563. return -SYNX_NOMEM;
  564. if (!synx_is_valid_idx(idx))
  565. return -SYNX_INVALID;
  566. rc = synx_gmem_lock(idx, &flags);
  567. if (rc)
  568. return rc;
  569. synx_g_obj = &synx_gmem.table[idx];
  570. synx_global_print_data(synx_g_obj, __func__);
  571. if (synx_g_obj->handle && synx_g_obj->refcount)
  572. synx_g_obj->refcount++;
  573. else
  574. rc = -SYNX_NOENT;
  575. synx_gmem_unlock(idx, &flags);
  576. return rc;
  577. }
  578. void synx_global_put_ref(u32 idx)
  579. {
  580. int rc;
  581. bool clear = false;
  582. unsigned long flags;
  583. struct synx_global_coredata *synx_g_obj;
  584. if (!synx_gmem.table)
  585. return;
  586. if (!synx_is_valid_idx(idx))
  587. return;
  588. rc = synx_gmem_lock(idx, &flags);
  589. if (rc)
  590. return;
  591. synx_g_obj = &synx_gmem.table[idx];
  592. synx_g_obj->refcount--;
  593. if (synx_g_obj->refcount == 0) {
  594. memset(synx_g_obj, 0, sizeof(*synx_g_obj));
  595. clear = true;
  596. }
  597. synx_gmem_unlock(idx, &flags);
  598. if (clear) {
  599. ipclite_global_test_and_clear_bit(idx%32,
  600. (ipclite_atomic_uint32_t *)(synx_gmem.bitmap + idx/32));
  601. dprintk(SYNX_MEM, "cleared global idx %u\n", idx);
  602. }
  603. }
  604. int synx_global_merge(u32 *idx_list, u32 num_list, u32 p_idx)
  605. {
  606. int rc = -SYNX_INVALID;
  607. unsigned long flags;
  608. struct synx_global_coredata *synx_g_obj;
  609. u32 i, j = 0;
  610. u32 idx;
  611. u32 num_child = 0;
  612. u32 parent_status = SYNX_STATE_ACTIVE;
  613. if (!synx_gmem.table)
  614. return -SYNX_NOMEM;
  615. if (!synx_is_valid_idx(p_idx))
  616. return -SYNX_INVALID;
  617. if (num_list == 0)
  618. return SYNX_SUCCESS;
  619. while (j < num_list) {
  620. idx = idx_list[j];
  621. if (!synx_is_valid_idx(idx))
  622. goto fail;
  623. rc = synx_gmem_lock(idx, &flags);
  624. if (rc)
  625. goto fail;
  626. synx_g_obj = &synx_gmem.table[idx];
  627. for (i = 0; i < SYNX_GLOBAL_MAX_PARENTS; i++) {
  628. if (synx_g_obj->parents[i] == 0) {
  629. synx_g_obj->parents[i] = p_idx;
  630. break;
  631. }
  632. }
  633. if (synx_g_obj->status == SYNX_STATE_ACTIVE)
  634. num_child++;
  635. else if (synx_g_obj->status >
  636. SYNX_STATE_SIGNALED_SUCCESS &&
  637. synx_g_obj->status <= SYNX_STATE_SIGNALED_MAX)
  638. parent_status = synx_g_obj->status;
  639. else if (parent_status == SYNX_STATE_ACTIVE)
  640. parent_status = synx_g_obj->status;
  641. if (synx_g_obj->status != SYNX_STATE_ACTIVE && synx_g_obj->num_child != 0)
  642. num_child++;
  643. dprintk(SYNX_MEM, "synx_obj->status %d parent status %d\n",
  644. synx_g_obj->status, parent_status);
  645. synx_gmem_unlock(idx, &flags);
  646. if (i >= SYNX_GLOBAL_MAX_PARENTS) {
  647. rc = -SYNX_NOMEM;
  648. goto fail;
  649. }
  650. j++;
  651. }
  652. rc = synx_gmem_lock(p_idx, &flags);
  653. if (rc)
  654. goto fail;
  655. synx_g_obj = &synx_gmem.table[p_idx];
  656. synx_g_obj->num_child += num_child;
  657. if (synx_g_obj->num_child != 0)
  658. synx_g_obj->refcount++;
  659. synx_g_obj->status = parent_status;
  660. synx_global_print_data(synx_g_obj, __func__);
  661. synx_gmem_unlock(p_idx, &flags);
  662. return SYNX_SUCCESS;
  663. fail:
  664. while (num_child--) {
  665. idx = idx_list[num_child];
  666. if (synx_gmem_lock(idx, &flags))
  667. continue;
  668. synx_g_obj = &synx_gmem.table[idx];
  669. for (i = 0; i < SYNX_GLOBAL_MAX_PARENTS; i++) {
  670. if (synx_g_obj->parents[i] == p_idx) {
  671. synx_g_obj->parents[i] = 0;
  672. break;
  673. }
  674. }
  675. synx_gmem_unlock(idx, &flags);
  676. }
  677. return rc;
  678. }
  679. int synx_global_recover(enum synx_core_id core_id)
  680. {
  681. int rc = SYNX_SUCCESS;
  682. u32 idx = 0;
  683. const u32 size = SYNX_GLOBAL_MAX_OBJS;
  684. unsigned long flags;
  685. struct synx_global_coredata *synx_g_obj;
  686. bool update;
  687. int *clear_idx = NULL;
  688. if (!synx_gmem.table)
  689. return -SYNX_NOMEM;
  690. clear_idx = kzalloc(sizeof(int)*SYNX_GLOBAL_MAX_OBJS, GFP_KERNEL);
  691. if (!clear_idx)
  692. return -SYNX_NOMEM;
  693. ipclite_recover(synx_global_map_core_id(core_id));
  694. /* recover synx gmem lock if it was owned by core in ssr */
  695. if (synx_gmem_lock_owner(0) == core_id) {
  696. synx_gmem_lock_owner_clear(0);
  697. hwspin_unlock_raw(synx_hwlock);
  698. }
  699. idx = find_next_bit((unsigned long *)synx_gmem.bitmap,
  700. size, idx + 1);
  701. while (idx < size) {
  702. update = false;
  703. rc = synx_gmem_lock(idx, &flags);
  704. if (rc)
  705. goto free;
  706. synx_g_obj = &synx_gmem.table[idx];
  707. if (synx_g_obj->refcount &&
  708. synx_g_obj->subscribers & (1UL << core_id)) {
  709. synx_g_obj->subscribers &= ~(1UL << core_id);
  710. synx_g_obj->refcount--;
  711. if (synx_g_obj->refcount == 0) {
  712. memset(synx_g_obj, 0, sizeof(*synx_g_obj));
  713. clear_idx[idx] = 1;
  714. } else if (synx_g_obj->status == SYNX_STATE_ACTIVE) {
  715. update = true;
  716. }
  717. }
  718. synx_gmem_unlock(idx, &flags);
  719. if (update)
  720. synx_global_update_status(idx,
  721. SYNX_STATE_SIGNALED_SSR);
  722. idx = find_next_bit((unsigned long *)synx_gmem.bitmap,
  723. size, idx + 1);
  724. }
  725. for (idx = 1; idx < size; idx++) {
  726. if (clear_idx[idx]) {
  727. ipclite_global_test_and_clear_bit(idx % 32,
  728. (ipclite_atomic_uint32_t *)(synx_gmem.bitmap + idx/32));
  729. dprintk(SYNX_MEM, "released global idx %u\n", idx);
  730. }
  731. }
  732. free:
  733. kfree(clear_idx);
  734. return rc;
  735. }
  736. int synx_global_mem_init(void)
  737. {
  738. int rc;
  739. int bitmap_size = SYNX_GLOBAL_MAX_OBJS/32;
  740. struct global_region_info mem_info;
  741. rc = get_global_partition_info(&mem_info);
  742. if (rc) {
  743. dprintk(SYNX_ERR, "error setting up global shared memory\n");
  744. return rc;
  745. }
  746. memset(mem_info.virt_base, 0, mem_info.size);
  747. dprintk(SYNX_DBG, "global shared memory %pK size %u\n",
  748. mem_info.virt_base, mem_info.size);
  749. synx_gmem.bitmap = (u32 *)mem_info.virt_base;
  750. synx_gmem.locks = synx_gmem.bitmap + bitmap_size;
  751. synx_gmem.table =
  752. (struct synx_global_coredata *)(synx_gmem.locks + 2);
  753. dprintk(SYNX_DBG, "global memory bitmap %pK, table %pK\n",
  754. synx_gmem.bitmap, synx_gmem.table);
  755. return synx_gmem_init();
  756. }