synx_global.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2022-2023, Qualcomm Innovation Center, Inc. All rights reserved.
  4. */
  5. #include <linux/hwspinlock.h>
  6. #include <linux/string.h>
  7. #include "synx_debugfs.h"
  8. #include "synx_global.h"
  9. static struct synx_shared_mem synx_gmem;
  10. static struct hwspinlock *synx_hwlock;
  11. static u32 synx_gmem_lock_owner(u32 idx)
  12. {
  13. /*
  14. * subscribers field of global table index 0 is used to
  15. * maintain synx gmem lock owner data.
  16. * core updates the field after acquiring the lock and
  17. * before releasing the lock appropriately.
  18. */
  19. return synx_gmem.table[0].subscribers;
  20. }
  21. static void synx_gmem_lock_owner_set(u32 idx)
  22. {
  23. synx_gmem.table[0].subscribers = SYNX_CORE_APSS;
  24. }
  25. static void synx_gmem_lock_owner_clear(u32 idx)
  26. {
  27. if (synx_gmem.table[0].subscribers != SYNX_CORE_APSS)
  28. dprintk(SYNX_WARN, "reset lock owned by core %u\n",
  29. synx_gmem.table[0].subscribers);
  30. synx_gmem.table[0].subscribers = SYNX_CORE_MAX;
  31. }
  32. static int synx_gmem_lock(u32 idx, unsigned long *flags)
  33. {
  34. int rc;
  35. if (!synx_hwlock)
  36. return -SYNX_INVALID;
  37. rc = hwspin_lock_timeout_irqsave(
  38. synx_hwlock, SYNX_HWSPIN_TIMEOUT, flags);
  39. if (!rc)
  40. synx_gmem_lock_owner_set(idx);
  41. return rc;
  42. }
  43. static void synx_gmem_unlock(u32 idx, unsigned long *flags)
  44. {
  45. synx_gmem_lock_owner_clear(idx);
  46. hwspin_unlock_irqrestore(synx_hwlock, flags);
  47. }
  48. static void synx_global_print_data(
  49. struct synx_global_coredata *synx_g_obj,
  50. const char *func)
  51. {
  52. int i = 0;
  53. dprintk(SYNX_VERB, "%s: status %u, handle %u, refcount %u",
  54. func, synx_g_obj->status,
  55. synx_g_obj->handle, synx_g_obj->refcount);
  56. dprintk(SYNX_VERB, "%s: subscribers %u, waiters %u, pending %u",
  57. func, synx_g_obj->subscribers, synx_g_obj->waiters,
  58. synx_g_obj->num_child);
  59. for (i = 0; i < SYNX_GLOBAL_MAX_PARENTS; i++)
  60. if (synx_g_obj->parents[i])
  61. dprintk(SYNX_VERB, "%s: parents %u:%u",
  62. func, i, synx_g_obj->parents[i]);
  63. }
  64. int synx_global_dump_shared_memory(void)
  65. {
  66. int rc = SYNX_SUCCESS, idx;
  67. unsigned long flags;
  68. struct synx_global_coredata *synx_g_obj;
  69. if (!synx_gmem.table)
  70. return -SYNX_INVALID;
  71. /* Print bitmap memory*/
  72. for (idx = 0; idx < SHRD_MEM_DUMP_NUM_BMAP_WORDS; idx++) {
  73. rc = synx_gmem_lock(idx, &flags);
  74. if (rc)
  75. return rc;
  76. dprintk(SYNX_VERB, "%s: idx %d, bitmap value %d",
  77. __func__, idx, synx_gmem.bitmap[idx]);
  78. synx_gmem_unlock(idx, &flags);
  79. }
  80. /* Print table memory*/
  81. for (idx = 0;
  82. idx < SHRD_MEM_DUMP_NUM_BMAP_WORDS * sizeof(u32) * NUM_CHAR_BIT;
  83. idx++) {
  84. rc = synx_gmem_lock(idx, &flags);
  85. if (rc)
  86. return rc;
  87. dprintk(SYNX_VERB, "%s: idx %d\n", __func__, idx);
  88. synx_g_obj = &synx_gmem.table[idx];
  89. synx_global_print_data(synx_g_obj, __func__);
  90. synx_gmem_unlock(idx, &flags);
  91. }
  92. return rc;
  93. }
  94. static int synx_gmem_init(void)
  95. {
  96. if (!synx_gmem.table)
  97. return -SYNX_NOMEM;
  98. synx_hwlock = hwspin_lock_request_specific(SYNX_HWSPIN_ID);
  99. if (!synx_hwlock) {
  100. dprintk(SYNX_ERR, "hwspinlock request failed\n");
  101. return -SYNX_NOMEM;
  102. }
  103. /* zero idx not allocated for clients */
  104. ipclite_global_test_and_set_bit(0,
  105. (ipclite_atomic_uint32_t *)synx_gmem.bitmap);
  106. memset(&synx_gmem.table[0], 0, sizeof(struct synx_global_coredata));
  107. return SYNX_SUCCESS;
  108. }
  109. u32 synx_global_map_core_id(enum synx_core_id id)
  110. {
  111. u32 host_id;
  112. switch (id) {
  113. case SYNX_CORE_APSS:
  114. host_id = IPCMEM_APPS; break;
  115. case SYNX_CORE_NSP:
  116. host_id = IPCMEM_CDSP; break;
  117. case SYNX_CORE_IRIS:
  118. host_id = IPCMEM_VPU; break;
  119. case SYNX_CORE_EVA:
  120. host_id = IPCMEM_CVP; break;
  121. case SYNX_CORE_ICP:
  122. host_id = IPCMEM_CAM; break;
  123. default:
  124. host_id = IPCMEM_NUM_HOSTS;
  125. dprintk(SYNX_ERR, "invalid core id\n");
  126. }
  127. return host_id;
  128. }
  129. int synx_global_alloc_index(u32 *idx)
  130. {
  131. int rc = SYNX_SUCCESS;
  132. u32 prev, index;
  133. const u32 size = SYNX_GLOBAL_MAX_OBJS;
  134. if (!synx_gmem.table)
  135. return -SYNX_NOMEM;
  136. if (IS_ERR_OR_NULL(idx))
  137. return -SYNX_INVALID;
  138. do {
  139. index = find_first_zero_bit((unsigned long *)synx_gmem.bitmap, size);
  140. if (index >= size) {
  141. rc = -SYNX_NOMEM;
  142. break;
  143. }
  144. prev = ipclite_global_test_and_set_bit(index % 32,
  145. (ipclite_atomic_uint32_t *)(synx_gmem.bitmap + index/32));
  146. if ((prev & (1UL << (index % 32))) == 0) {
  147. *idx = index;
  148. dprintk(SYNX_MEM, "allocated global idx %u\n", *idx);
  149. break;
  150. }
  151. } while (true);
  152. return rc;
  153. }
  154. int synx_global_init_coredata(u32 h_synx)
  155. {
  156. int rc;
  157. unsigned long flags;
  158. struct synx_global_coredata *synx_g_obj;
  159. u32 idx = h_synx & SYNX_HANDLE_INDEX_MASK;
  160. if (!synx_gmem.table)
  161. return -SYNX_NOMEM;
  162. if (!synx_is_valid_idx(idx))
  163. return -SYNX_INVALID;
  164. rc = synx_gmem_lock(idx, &flags);
  165. if (rc)
  166. return rc;
  167. synx_g_obj = &synx_gmem.table[idx];
  168. if (synx_g_obj->status != 0 || synx_g_obj->refcount != 0 ||
  169. synx_g_obj->subscribers != 0 || synx_g_obj->handle != 0 ||
  170. synx_g_obj->parents[0] != 0) {
  171. dprintk(SYNX_ERR,
  172. "entry not cleared for idx %u,\n"
  173. "synx_g_obj->status %d,\n"
  174. "synx_g_obj->refcount %d,\n"
  175. "synx_g_obj->subscribers %d,\n"
  176. "synx_g_obj->handle %u,\n"
  177. "synx_g_obj->parents[0] %d\n",
  178. idx, synx_g_obj->status,
  179. synx_g_obj->refcount,
  180. synx_g_obj->subscribers,
  181. synx_g_obj->handle,
  182. synx_g_obj->parents[0]);
  183. synx_gmem_unlock(idx, &flags);
  184. return -SYNX_INVALID;
  185. }
  186. memset(synx_g_obj, 0, sizeof(*synx_g_obj));
  187. /* set status to active */
  188. synx_g_obj->status = SYNX_STATE_ACTIVE;
  189. synx_g_obj->refcount = 1;
  190. synx_g_obj->subscribers = (1UL << SYNX_CORE_APSS);
  191. synx_g_obj->handle = h_synx;
  192. synx_gmem_unlock(idx, &flags);
  193. return SYNX_SUCCESS;
  194. }
  195. static int synx_global_get_waiting_cores_locked(
  196. struct synx_global_coredata *synx_g_obj,
  197. bool *cores)
  198. {
  199. int i;
  200. synx_global_print_data(synx_g_obj, __func__);
  201. for (i = 0; i < SYNX_CORE_MAX; i++) {
  202. if (synx_g_obj->waiters & (1UL << i)) {
  203. cores[i] = true;
  204. dprintk(SYNX_VERB,
  205. "waiting for handle %u/n",
  206. synx_g_obj->handle);
  207. }
  208. }
  209. /* clear waiter list so signals are not repeated */
  210. synx_g_obj->waiters = 0;
  211. return SYNX_SUCCESS;
  212. }
  213. int synx_global_get_waiting_cores(u32 idx, bool *cores)
  214. {
  215. int rc;
  216. unsigned long flags;
  217. struct synx_global_coredata *synx_g_obj;
  218. if (!synx_gmem.table)
  219. return -SYNX_NOMEM;
  220. if (IS_ERR_OR_NULL(cores) || !synx_is_valid_idx(idx))
  221. return -SYNX_INVALID;
  222. rc = synx_gmem_lock(idx, &flags);
  223. if (rc)
  224. return rc;
  225. synx_g_obj = &synx_gmem.table[idx];
  226. synx_global_get_waiting_cores_locked(synx_g_obj, cores);
  227. synx_gmem_unlock(idx, &flags);
  228. return SYNX_SUCCESS;
  229. }
  230. int synx_global_set_waiting_core(u32 idx, enum synx_core_id id)
  231. {
  232. int rc;
  233. unsigned long flags;
  234. struct synx_global_coredata *synx_g_obj;
  235. if (!synx_gmem.table)
  236. return -SYNX_NOMEM;
  237. if (id >= SYNX_CORE_MAX || !synx_is_valid_idx(idx))
  238. return -SYNX_INVALID;
  239. rc = synx_gmem_lock(idx, &flags);
  240. if (rc)
  241. return rc;
  242. synx_g_obj = &synx_gmem.table[idx];
  243. synx_g_obj->waiters |= (1UL << id);
  244. synx_gmem_unlock(idx, &flags);
  245. return SYNX_SUCCESS;
  246. }
  247. int synx_global_get_subscribed_cores(u32 idx, bool *cores)
  248. {
  249. int i;
  250. int rc;
  251. unsigned long flags;
  252. struct synx_global_coredata *synx_g_obj;
  253. if (!synx_gmem.table)
  254. return -SYNX_NOMEM;
  255. if (IS_ERR_OR_NULL(cores) || !synx_is_valid_idx(idx))
  256. return -SYNX_INVALID;
  257. rc = synx_gmem_lock(idx, &flags);
  258. if (rc)
  259. return rc;
  260. synx_g_obj = &synx_gmem.table[idx];
  261. for (i = 0; i < SYNX_CORE_MAX; i++)
  262. if (synx_g_obj->subscribers & (1UL << i))
  263. cores[i] = true;
  264. synx_gmem_unlock(idx, &flags);
  265. return SYNX_SUCCESS;
  266. }
  267. int synx_global_fetch_handle_details(u32 idx, u32 *h_synx)
  268. {
  269. int rc;
  270. unsigned long flags;
  271. struct synx_global_coredata *synx_g_obj;
  272. if (!synx_gmem.table)
  273. return -SYNX_NOMEM;
  274. if (IS_ERR_OR_NULL(h_synx) || !synx_is_valid_idx(idx))
  275. return -SYNX_INVALID;
  276. rc = synx_gmem_lock(idx, &flags);
  277. if (rc)
  278. return rc;
  279. synx_g_obj = &synx_gmem.table[idx];
  280. *h_synx = synx_g_obj->handle;
  281. synx_gmem_unlock(idx, &flags);
  282. return SYNX_SUCCESS;
  283. }
  284. int synx_global_set_subscribed_core(u32 idx, enum synx_core_id id)
  285. {
  286. int rc;
  287. unsigned long flags;
  288. struct synx_global_coredata *synx_g_obj;
  289. if (!synx_gmem.table)
  290. return -SYNX_NOMEM;
  291. if (id >= SYNX_CORE_MAX || !synx_is_valid_idx(idx))
  292. return -SYNX_INVALID;
  293. rc = synx_gmem_lock(idx, &flags);
  294. if (rc)
  295. return rc;
  296. synx_g_obj = &synx_gmem.table[idx];
  297. synx_g_obj->subscribers |= (1UL << id);
  298. synx_gmem_unlock(idx, &flags);
  299. return SYNX_SUCCESS;
  300. }
  301. int synx_global_clear_subscribed_core(u32 idx, enum synx_core_id id)
  302. {
  303. int rc;
  304. unsigned long flags;
  305. struct synx_global_coredata *synx_g_obj;
  306. if (!synx_gmem.table)
  307. return -SYNX_NOMEM;
  308. if (id >= SYNX_CORE_MAX || !synx_is_valid_idx(idx))
  309. return -SYNX_INVALID;
  310. rc = synx_gmem_lock(idx, &flags);
  311. if (rc)
  312. return rc;
  313. synx_g_obj = &synx_gmem.table[idx];
  314. synx_g_obj->subscribers &= ~(1UL << id);
  315. synx_gmem_unlock(idx, &flags);
  316. return SYNX_SUCCESS;
  317. }
  318. u32 synx_global_get_parents_num(u32 idx)
  319. {
  320. int rc;
  321. unsigned long flags;
  322. struct synx_global_coredata *synx_g_obj;
  323. u32 i, count = 0;
  324. if (!synx_gmem.table)
  325. return 0;
  326. if (!synx_is_valid_idx(idx))
  327. return 0;
  328. rc = synx_gmem_lock(idx, &flags);
  329. if (rc)
  330. return rc;
  331. synx_g_obj = &synx_gmem.table[idx];
  332. for (i = 0; i < SYNX_GLOBAL_MAX_PARENTS; i++) {
  333. if (synx_g_obj->parents[i] != 0)
  334. count++;
  335. }
  336. synx_gmem_unlock(idx, &flags);
  337. return count;
  338. }
  339. static int synx_global_get_parents_locked(
  340. struct synx_global_coredata *synx_g_obj, u32 *parents)
  341. {
  342. u32 i;
  343. if (!synx_g_obj || !parents)
  344. return -SYNX_NOMEM;
  345. for (i = 0; i < SYNX_GLOBAL_MAX_PARENTS; i++)
  346. parents[i] = synx_g_obj->parents[i];
  347. return SYNX_SUCCESS;
  348. }
  349. int synx_global_get_parents(u32 idx, u32 *parents)
  350. {
  351. int rc;
  352. unsigned long flags;
  353. struct synx_global_coredata *synx_g_obj;
  354. if (!synx_gmem.table || !parents)
  355. return -SYNX_NOMEM;
  356. if (!synx_is_valid_idx(idx))
  357. return -SYNX_INVALID;
  358. rc = synx_gmem_lock(idx, &flags);
  359. if (rc)
  360. return rc;
  361. synx_g_obj = &synx_gmem.table[idx];
  362. rc = synx_global_get_parents_locked(synx_g_obj, parents);
  363. synx_gmem_unlock(idx, &flags);
  364. return rc;
  365. }
  366. u32 synx_global_get_status(u32 idx)
  367. {
  368. int rc;
  369. unsigned long flags;
  370. u32 status;
  371. struct synx_global_coredata *synx_g_obj;
  372. if (!synx_gmem.table)
  373. return 0;
  374. if (!synx_is_valid_idx(idx))
  375. return 0;
  376. rc = synx_gmem_lock(idx, &flags);
  377. if (rc)
  378. return rc;
  379. synx_g_obj = &synx_gmem.table[idx];
  380. status = synx_g_obj->status;
  381. synx_gmem_unlock(idx, &flags);
  382. return status;
  383. }
  384. u32 synx_global_test_status_set_wait(u32 idx,
  385. enum synx_core_id id)
  386. {
  387. int rc;
  388. unsigned long flags;
  389. u32 status;
  390. struct synx_global_coredata *synx_g_obj;
  391. if (!synx_gmem.table)
  392. return 0;
  393. if (id >= SYNX_CORE_MAX || !synx_is_valid_idx(idx))
  394. return 0;
  395. rc = synx_gmem_lock(idx, &flags);
  396. if (rc)
  397. return 0;
  398. synx_g_obj = &synx_gmem.table[idx];
  399. synx_global_print_data(synx_g_obj, __func__);
  400. status = synx_g_obj->status;
  401. /* if handle is still ACTIVE */
  402. if (status == SYNX_STATE_ACTIVE)
  403. synx_g_obj->waiters |= (1UL << id);
  404. else
  405. dprintk(SYNX_DBG, "handle %u already signaled %u",
  406. synx_g_obj->handle, synx_g_obj->status);
  407. synx_gmem_unlock(idx, &flags);
  408. return status;
  409. }
  410. static int synx_global_update_status_core(u32 idx,
  411. u32 status)
  412. {
  413. u32 i, p_idx;
  414. int rc;
  415. bool clear = false;
  416. unsigned long flags;
  417. uint64_t data;
  418. struct synx_global_coredata *synx_g_obj;
  419. u32 h_parents[SYNX_GLOBAL_MAX_PARENTS] = {0};
  420. bool wait_cores[SYNX_CORE_MAX] = {false};
  421. rc = synx_gmem_lock(idx, &flags);
  422. if (rc)
  423. return rc;
  424. synx_g_obj = &synx_gmem.table[idx];
  425. synx_global_print_data(synx_g_obj, __func__);
  426. /* prepare for cross core signaling */
  427. data = synx_g_obj->handle;
  428. data <<= 32;
  429. if (synx_g_obj->num_child != 0) {
  430. /* composite handle */
  431. synx_g_obj->num_child--;
  432. if (synx_g_obj->num_child == 0) {
  433. if (synx_g_obj->status == SYNX_STATE_ACTIVE) {
  434. synx_g_obj->status =
  435. (status == SYNX_STATE_SIGNALED_SUCCESS) ?
  436. SYNX_STATE_SIGNALED_SUCCESS : SYNX_STATE_SIGNALED_ERROR;
  437. data |= synx_g_obj->status;
  438. synx_global_get_waiting_cores_locked(synx_g_obj,
  439. wait_cores);
  440. synx_global_get_parents_locked(synx_g_obj, h_parents);
  441. } else {
  442. data = 0;
  443. dprintk(SYNX_WARN,
  444. "merged handle %u already in state %u\n",
  445. synx_g_obj->handle, synx_g_obj->status);
  446. }
  447. /* release ref held by constituting handles */
  448. synx_g_obj->refcount--;
  449. if (synx_g_obj->refcount == 0) {
  450. memset(synx_g_obj, 0,
  451. sizeof(*synx_g_obj));
  452. clear = true;
  453. }
  454. } else if (status != SYNX_STATE_SIGNALED_SUCCESS) {
  455. synx_g_obj->status = SYNX_STATE_SIGNALED_ERROR;
  456. data |= synx_g_obj->status;
  457. synx_global_get_waiting_cores_locked(synx_g_obj,
  458. wait_cores);
  459. synx_global_get_parents_locked(synx_g_obj, h_parents);
  460. dprintk(SYNX_WARN,
  461. "merged handle %u signaled with error state\n",
  462. synx_g_obj->handle);
  463. } else {
  464. /* pending notification from handles */
  465. data = 0;
  466. dprintk(SYNX_DBG,
  467. "Child notified parent handle %u, pending %u\n",
  468. synx_g_obj->handle, synx_g_obj->num_child);
  469. }
  470. } else {
  471. synx_g_obj->status = status;
  472. data |= synx_g_obj->status;
  473. synx_global_get_waiting_cores_locked(synx_g_obj,
  474. wait_cores);
  475. synx_global_get_parents_locked(synx_g_obj, h_parents);
  476. }
  477. synx_gmem_unlock(idx, &flags);
  478. if (clear) {
  479. ipclite_global_test_and_clear_bit(idx%32,
  480. (ipclite_atomic_uint32_t *)(synx_gmem.bitmap + idx/32));
  481. dprintk(SYNX_MEM,
  482. "cleared global idx %u\n", idx);
  483. }
  484. /* notify waiting clients on signal */
  485. if (data) {
  486. /* notify wait client */
  487. /* In case of SSR, someone might be waiting on same core
  488. * However, in other cases, synx_signal API will take care
  489. * of signaling handles on same core and thus we don't need
  490. * to send interrupt
  491. */
  492. if (status == SYNX_STATE_SIGNALED_SSR)
  493. i = 0;
  494. else
  495. i = 1;
  496. for (; i < SYNX_CORE_MAX ; i++) {
  497. if (!wait_cores[i])
  498. continue;
  499. dprintk(SYNX_DBG,
  500. "invoking ipc signal handle %u, status %u\n",
  501. synx_g_obj->handle, synx_g_obj->status);
  502. if (ipclite_msg_send(
  503. synx_global_map_core_id(i),
  504. data))
  505. dprintk(SYNX_ERR,
  506. "ipc signaling %llu to core %u failed\n",
  507. data, i);
  508. }
  509. }
  510. /* handle parent notifications */
  511. for (i = 0; i < SYNX_GLOBAL_MAX_PARENTS; i++) {
  512. p_idx = h_parents[i];
  513. if (p_idx == 0)
  514. continue;
  515. synx_global_update_status_core(p_idx, status);
  516. }
  517. return SYNX_SUCCESS;
  518. }
  519. int synx_global_update_status(u32 idx, u32 status)
  520. {
  521. int rc = -SYNX_INVALID;
  522. unsigned long flags;
  523. struct synx_global_coredata *synx_g_obj;
  524. if (!synx_gmem.table)
  525. return -SYNX_NOMEM;
  526. if (!synx_is_valid_idx(idx) || status <= SYNX_STATE_ACTIVE)
  527. return -SYNX_INVALID;
  528. rc = synx_gmem_lock(idx, &flags);
  529. if (rc)
  530. return rc;
  531. synx_g_obj = &synx_gmem.table[idx];
  532. if (synx_g_obj->num_child != 0) {
  533. /* composite handle cannot be signaled */
  534. goto fail;
  535. } else if (synx_g_obj->status != SYNX_STATE_ACTIVE) {
  536. rc = -SYNX_ALREADY;
  537. goto fail;
  538. }
  539. synx_gmem_unlock(idx, &flags);
  540. return synx_global_update_status_core(idx, status);
  541. fail:
  542. synx_gmem_unlock(idx, &flags);
  543. return rc;
  544. }
  545. int synx_global_get_ref(u32 idx)
  546. {
  547. int rc;
  548. unsigned long flags;
  549. struct synx_global_coredata *synx_g_obj;
  550. if (!synx_gmem.table)
  551. return -SYNX_NOMEM;
  552. if (!synx_is_valid_idx(idx))
  553. return -SYNX_INVALID;
  554. rc = synx_gmem_lock(idx, &flags);
  555. if (rc)
  556. return rc;
  557. synx_g_obj = &synx_gmem.table[idx];
  558. synx_global_print_data(synx_g_obj, __func__);
  559. if (synx_g_obj->handle && synx_g_obj->refcount)
  560. synx_g_obj->refcount++;
  561. else
  562. rc = -SYNX_NOENT;
  563. synx_gmem_unlock(idx, &flags);
  564. return rc;
  565. }
  566. void synx_global_put_ref(u32 idx)
  567. {
  568. int rc;
  569. bool clear = false;
  570. unsigned long flags;
  571. struct synx_global_coredata *synx_g_obj;
  572. if (!synx_gmem.table)
  573. return;
  574. if (!synx_is_valid_idx(idx))
  575. return;
  576. rc = synx_gmem_lock(idx, &flags);
  577. if (rc)
  578. return;
  579. synx_g_obj = &synx_gmem.table[idx];
  580. synx_g_obj->refcount--;
  581. if (synx_g_obj->refcount == 0) {
  582. memset(synx_g_obj, 0, sizeof(*synx_g_obj));
  583. clear = true;
  584. }
  585. synx_gmem_unlock(idx, &flags);
  586. if (clear) {
  587. ipclite_global_test_and_clear_bit(idx%32,
  588. (ipclite_atomic_uint32_t *)(synx_gmem.bitmap + idx/32));
  589. dprintk(SYNX_MEM, "cleared global idx %u\n", idx);
  590. }
  591. }
  592. int synx_global_merge(u32 *idx_list, u32 num_list, u32 p_idx)
  593. {
  594. int rc = -SYNX_INVALID;
  595. unsigned long flags;
  596. struct synx_global_coredata *synx_g_obj;
  597. u32 i, j = 0;
  598. u32 idx;
  599. bool sig_error = false;
  600. u32 num_child = 0;
  601. if (!synx_gmem.table)
  602. return -SYNX_NOMEM;
  603. if (!synx_is_valid_idx(p_idx))
  604. return -SYNX_INVALID;
  605. if (num_list == 0)
  606. return SYNX_SUCCESS;
  607. while (j < num_list) {
  608. idx = idx_list[j];
  609. if (!synx_is_valid_idx(idx))
  610. goto fail;
  611. rc = synx_gmem_lock(idx, &flags);
  612. if (rc)
  613. goto fail;
  614. synx_g_obj = &synx_gmem.table[idx];
  615. if (synx_g_obj->status == SYNX_STATE_ACTIVE) {
  616. for (i = 0; i < SYNX_GLOBAL_MAX_PARENTS; i++) {
  617. if (synx_g_obj->parents[i] == 0) {
  618. synx_g_obj->parents[i] = p_idx;
  619. break;
  620. }
  621. }
  622. num_child++;
  623. } else if (synx_g_obj->status >
  624. SYNX_STATE_SIGNALED_SUCCESS) {
  625. sig_error = true;
  626. }
  627. synx_gmem_unlock(idx, &flags);
  628. if (i >= SYNX_GLOBAL_MAX_PARENTS) {
  629. rc = -SYNX_NOMEM;
  630. goto fail;
  631. }
  632. j++;
  633. }
  634. rc = synx_gmem_lock(p_idx, &flags);
  635. if (rc)
  636. goto fail;
  637. synx_g_obj = &synx_gmem.table[p_idx];
  638. synx_g_obj->num_child += num_child;
  639. if (sig_error)
  640. synx_g_obj->status = SYNX_STATE_SIGNALED_ERROR;
  641. else if (synx_g_obj->num_child != 0)
  642. synx_g_obj->refcount++;
  643. else if (synx_g_obj->num_child == 0 &&
  644. synx_g_obj->status == SYNX_STATE_ACTIVE)
  645. synx_g_obj->status = SYNX_STATE_SIGNALED_SUCCESS;
  646. synx_global_print_data(synx_g_obj, __func__);
  647. synx_gmem_unlock(p_idx, &flags);
  648. return SYNX_SUCCESS;
  649. fail:
  650. while (num_child--) {
  651. idx = idx_list[num_child];
  652. if (synx_gmem_lock(idx, &flags))
  653. continue;
  654. synx_g_obj = &synx_gmem.table[idx];
  655. for (i = 0; i < SYNX_GLOBAL_MAX_PARENTS; i++) {
  656. if (synx_g_obj->parents[i] == p_idx) {
  657. synx_g_obj->parents[i] = 0;
  658. break;
  659. }
  660. }
  661. synx_gmem_unlock(idx, &flags);
  662. }
  663. return rc;
  664. }
  665. int synx_global_recover(enum synx_core_id core_id)
  666. {
  667. int rc = SYNX_SUCCESS;
  668. u32 idx = 0;
  669. const u32 size = SYNX_GLOBAL_MAX_OBJS;
  670. unsigned long flags;
  671. struct synx_global_coredata *synx_g_obj;
  672. bool update;
  673. int *clear_idx = NULL;
  674. if (!synx_gmem.table)
  675. return -SYNX_NOMEM;
  676. clear_idx = kzalloc(sizeof(int)*SYNX_GLOBAL_MAX_OBJS, GFP_KERNEL);
  677. if (!clear_idx)
  678. return -SYNX_NOMEM;
  679. ipclite_recover(synx_global_map_core_id(core_id));
  680. /* recover synx gmem lock if it was owned by core in ssr */
  681. if (synx_gmem_lock_owner(0) == core_id) {
  682. synx_gmem_lock_owner_clear(0);
  683. hwspin_unlock_raw(synx_hwlock);
  684. }
  685. idx = find_next_bit((unsigned long *)synx_gmem.bitmap,
  686. size, idx + 1);
  687. while (idx < size) {
  688. update = false;
  689. rc = synx_gmem_lock(idx, &flags);
  690. if (rc)
  691. goto free;
  692. synx_g_obj = &synx_gmem.table[idx];
  693. if (synx_g_obj->refcount &&
  694. synx_g_obj->subscribers & (1UL << core_id)) {
  695. synx_g_obj->subscribers &= ~(1UL << core_id);
  696. synx_g_obj->refcount--;
  697. if (synx_g_obj->refcount == 0) {
  698. memset(synx_g_obj, 0, sizeof(*synx_g_obj));
  699. clear_idx[idx] = 1;
  700. } else if (synx_g_obj->status == SYNX_STATE_ACTIVE) {
  701. update = true;
  702. }
  703. }
  704. synx_gmem_unlock(idx, &flags);
  705. if (update)
  706. synx_global_update_status(idx,
  707. SYNX_STATE_SIGNALED_SSR);
  708. idx = find_next_bit((unsigned long *)synx_gmem.bitmap,
  709. size, idx + 1);
  710. }
  711. for (idx = 1; idx < size; idx++) {
  712. if (clear_idx[idx]) {
  713. ipclite_global_test_and_clear_bit(idx % 32,
  714. (ipclite_atomic_uint32_t *)(synx_gmem.bitmap + idx/32));
  715. dprintk(SYNX_MEM, "released global idx %u\n", idx);
  716. }
  717. }
  718. free:
  719. kfree(clear_idx);
  720. return rc;
  721. }
  722. int synx_global_mem_init(void)
  723. {
  724. int rc;
  725. int bitmap_size = SYNX_GLOBAL_MAX_OBJS/32;
  726. struct global_region_info mem_info;
  727. rc = get_global_partition_info(&mem_info);
  728. if (rc) {
  729. dprintk(SYNX_ERR, "error setting up global shared memory\n");
  730. return rc;
  731. }
  732. memset(mem_info.virt_base, 0, mem_info.size);
  733. dprintk(SYNX_DBG, "global shared memory %pK size %u\n",
  734. mem_info.virt_base, mem_info.size);
  735. synx_gmem.bitmap = (u32 *)mem_info.virt_base;
  736. synx_gmem.locks = synx_gmem.bitmap + bitmap_size;
  737. synx_gmem.table =
  738. (struct synx_global_coredata *)(synx_gmem.locks + 2);
  739. dprintk(SYNX_DBG, "global memory bitmap %pK, table %pK\n",
  740. synx_gmem.bitmap, synx_gmem.table);
  741. return synx_gmem_init();
  742. }