synx_global.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2022-2023, Qualcomm Innovation Center, Inc. All rights reserved.
  4. */
  5. #include <linux/hwspinlock.h>
  6. #include <linux/string.h>
  7. #include "synx_debugfs.h"
  8. #include "synx_global.h"
  9. static struct synx_shared_mem synx_gmem;
  10. static struct hwspinlock *synx_hwlock;
  11. static u32 synx_gmem_lock_owner(u32 idx)
  12. {
  13. /*
  14. * subscribers field of global table index 0 is used to
  15. * maintain synx gmem lock owner data.
  16. * core updates the field after acquiring the lock and
  17. * before releasing the lock appropriately.
  18. */
  19. return synx_gmem.table[0].subscribers;
  20. }
  21. static void synx_gmem_lock_owner_set(u32 idx)
  22. {
  23. synx_gmem.table[0].subscribers = SYNX_CORE_APSS;
  24. }
  25. static void synx_gmem_lock_owner_clear(u32 idx)
  26. {
  27. if (synx_gmem.table[0].subscribers != SYNX_CORE_APSS)
  28. dprintk(SYNX_WARN, "reset lock owned by core %u\n",
  29. synx_gmem.table[0].subscribers);
  30. synx_gmem.table[0].subscribers = SYNX_CORE_MAX;
  31. }
  32. static int synx_gmem_lock(u32 idx, unsigned long *flags)
  33. {
  34. int rc;
  35. if (!synx_hwlock)
  36. return -SYNX_INVALID;
  37. rc = hwspin_lock_timeout_irqsave(
  38. synx_hwlock, SYNX_HWSPIN_TIMEOUT, flags);
  39. if (!rc)
  40. synx_gmem_lock_owner_set(idx);
  41. return rc;
  42. }
  43. static void synx_gmem_unlock(u32 idx, unsigned long *flags)
  44. {
  45. synx_gmem_lock_owner_clear(idx);
  46. hwspin_unlock_irqrestore(synx_hwlock, flags);
  47. }
  48. static void synx_global_print_data(
  49. struct synx_global_coredata *synx_g_obj,
  50. const char *func)
  51. {
  52. int i = 0;
  53. dprintk(SYNX_VERB, "%s: status %u, handle %u, refcount %u",
  54. func, synx_g_obj->status,
  55. synx_g_obj->handle, synx_g_obj->refcount);
  56. dprintk(SYNX_VERB, "%s: subscribers %u, waiters %u, pending %u",
  57. func, synx_g_obj->subscribers, synx_g_obj->waiters,
  58. synx_g_obj->num_child);
  59. for (i = 0; i < SYNX_GLOBAL_MAX_PARENTS; i++)
  60. if (synx_g_obj->parents[i])
  61. dprintk(SYNX_VERB, "%s: parents %u:%u",
  62. func, i, synx_g_obj->parents[i]);
  63. }
  64. int synx_global_dump_shared_memory(void)
  65. {
  66. int rc = SYNX_SUCCESS, idx;
  67. unsigned long flags;
  68. struct synx_global_coredata *synx_g_obj;
  69. if (!synx_gmem.table)
  70. return -SYNX_INVALID;
  71. /* Print bitmap memory*/
  72. for (idx = 0; idx < SHRD_MEM_DUMP_NUM_BMAP_WORDS; idx++) {
  73. rc = synx_gmem_lock(idx, &flags);
  74. if (rc)
  75. return rc;
  76. dprintk(SYNX_VERB, "%s: idx %d, bitmap value %d",
  77. __func__, idx, synx_gmem.bitmap[idx]);
  78. synx_gmem_unlock(idx, &flags);
  79. }
  80. /* Print table memory*/
  81. for (idx = 0;
  82. idx < SHRD_MEM_DUMP_NUM_BMAP_WORDS * sizeof(u32) * NUM_CHAR_BIT;
  83. idx++) {
  84. rc = synx_gmem_lock(idx, &flags);
  85. if (rc)
  86. return rc;
  87. dprintk(SYNX_VERB, "%s: idx %d\n", __func__, idx);
  88. synx_g_obj = &synx_gmem.table[idx];
  89. synx_global_print_data(synx_g_obj, __func__);
  90. synx_gmem_unlock(idx, &flags);
  91. }
  92. return rc;
  93. }
  94. static int synx_gmem_init(void)
  95. {
  96. if (!synx_gmem.table)
  97. return -SYNX_NOMEM;
  98. synx_hwlock = hwspin_lock_request_specific(SYNX_HWSPIN_ID);
  99. if (!synx_hwlock) {
  100. dprintk(SYNX_ERR, "hwspinlock request failed\n");
  101. return -SYNX_NOMEM;
  102. }
  103. /* zero idx not allocated for clients */
  104. ipclite_global_test_and_set_bit(0,
  105. (ipclite_atomic_uint32_t *)synx_gmem.bitmap);
  106. memset(&synx_gmem.table[0], 0, sizeof(struct synx_global_coredata));
  107. return SYNX_SUCCESS;
  108. }
  109. u32 synx_global_map_core_id(enum synx_core_id id)
  110. {
  111. u32 host_id;
  112. switch (id) {
  113. case SYNX_CORE_APSS:
  114. host_id = IPCMEM_APPS; break;
  115. case SYNX_CORE_NSP:
  116. host_id = IPCMEM_CDSP; break;
  117. case SYNX_CORE_IRIS:
  118. host_id = IPCMEM_VPU; break;
  119. case SYNX_CORE_EVA:
  120. host_id = IPCMEM_CVP; break;
  121. case SYNX_CORE_ICP:
  122. host_id = IPCMEM_CAM; break;
  123. default:
  124. host_id = IPCMEM_NUM_HOSTS;
  125. dprintk(SYNX_ERR, "invalid core id\n");
  126. }
  127. return host_id;
  128. }
  129. int synx_global_alloc_index(u32 *idx)
  130. {
  131. int rc = SYNX_SUCCESS;
  132. u32 prev, index;
  133. const u32 size = SYNX_GLOBAL_MAX_OBJS;
  134. if (!synx_gmem.table)
  135. return -SYNX_NOMEM;
  136. if (IS_ERR_OR_NULL(idx))
  137. return -SYNX_INVALID;
  138. do {
  139. index = find_first_zero_bit((unsigned long *)synx_gmem.bitmap, size);
  140. if (index >= size) {
  141. rc = -SYNX_NOMEM;
  142. break;
  143. }
  144. prev = ipclite_global_test_and_set_bit(index % 32,
  145. (ipclite_atomic_uint32_t *)(synx_gmem.bitmap + index/32));
  146. if ((prev & (1UL << (index % 32))) == 0) {
  147. *idx = index;
  148. dprintk(SYNX_MEM, "allocated global idx %u\n", *idx);
  149. break;
  150. }
  151. } while (true);
  152. return rc;
  153. }
  154. int synx_global_init_coredata(u32 h_synx)
  155. {
  156. int rc;
  157. unsigned long flags;
  158. struct synx_global_coredata *synx_g_obj;
  159. u32 idx = h_synx & SYNX_HANDLE_INDEX_MASK;
  160. if (!synx_gmem.table)
  161. return -SYNX_NOMEM;
  162. if (!synx_is_valid_idx(idx))
  163. return -SYNX_INVALID;
  164. rc = synx_gmem_lock(idx, &flags);
  165. if (rc)
  166. return rc;
  167. synx_g_obj = &synx_gmem.table[idx];
  168. if (synx_g_obj->status != 0 || synx_g_obj->refcount != 0 ||
  169. synx_g_obj->subscribers != 0 || synx_g_obj->handle != 0 ||
  170. synx_g_obj->parents[0] != 0) {
  171. dprintk(SYNX_ERR,
  172. "entry not cleared for idx %u,\n"
  173. "synx_g_obj->status %d,\n"
  174. "synx_g_obj->refcount %d,\n"
  175. "synx_g_obj->subscribers %d,\n"
  176. "synx_g_obj->handle %u,\n"
  177. "synx_g_obj->parents[0] %d\n",
  178. idx, synx_g_obj->status,
  179. synx_g_obj->refcount,
  180. synx_g_obj->subscribers,
  181. synx_g_obj->handle,
  182. synx_g_obj->parents[0]);
  183. synx_gmem_unlock(idx, &flags);
  184. return -SYNX_INVALID;
  185. }
  186. memset(synx_g_obj, 0, sizeof(*synx_g_obj));
  187. /* set status to active */
  188. synx_g_obj->status = SYNX_STATE_ACTIVE;
  189. synx_g_obj->refcount = 1;
  190. synx_g_obj->subscribers = (1UL << SYNX_CORE_APSS);
  191. synx_g_obj->handle = h_synx;
  192. synx_gmem_unlock(idx, &flags);
  193. return SYNX_SUCCESS;
  194. }
  195. static int synx_global_get_waiting_cores_locked(
  196. struct synx_global_coredata *synx_g_obj,
  197. bool *cores)
  198. {
  199. int i;
  200. synx_global_print_data(synx_g_obj, __func__);
  201. for (i = 0; i < SYNX_CORE_MAX; i++) {
  202. if (synx_g_obj->waiters & (1UL << i)) {
  203. cores[i] = true;
  204. dprintk(SYNX_VERB,
  205. "waiting for handle %u/n",
  206. synx_g_obj->handle);
  207. }
  208. }
  209. /* clear waiter list so signals are not repeated */
  210. synx_g_obj->waiters = 0;
  211. return SYNX_SUCCESS;
  212. }
  213. int synx_global_get_waiting_cores(u32 idx, bool *cores)
  214. {
  215. int rc;
  216. unsigned long flags;
  217. struct synx_global_coredata *synx_g_obj;
  218. if (!synx_gmem.table)
  219. return -SYNX_NOMEM;
  220. if (IS_ERR_OR_NULL(cores) || !synx_is_valid_idx(idx))
  221. return -SYNX_INVALID;
  222. rc = synx_gmem_lock(idx, &flags);
  223. if (rc)
  224. return rc;
  225. synx_g_obj = &synx_gmem.table[idx];
  226. synx_global_get_waiting_cores_locked(synx_g_obj, cores);
  227. synx_gmem_unlock(idx, &flags);
  228. return SYNX_SUCCESS;
  229. }
  230. int synx_global_set_waiting_core(u32 idx, enum synx_core_id id)
  231. {
  232. int rc;
  233. unsigned long flags;
  234. struct synx_global_coredata *synx_g_obj;
  235. if (!synx_gmem.table)
  236. return -SYNX_NOMEM;
  237. if (id >= SYNX_CORE_MAX || !synx_is_valid_idx(idx))
  238. return -SYNX_INVALID;
  239. rc = synx_gmem_lock(idx, &flags);
  240. if (rc)
  241. return rc;
  242. synx_g_obj = &synx_gmem.table[idx];
  243. synx_g_obj->waiters |= (1UL << id);
  244. synx_gmem_unlock(idx, &flags);
  245. return SYNX_SUCCESS;
  246. }
  247. int synx_global_get_subscribed_cores(u32 idx, bool *cores)
  248. {
  249. int i;
  250. int rc;
  251. unsigned long flags;
  252. struct synx_global_coredata *synx_g_obj;
  253. if (!synx_gmem.table)
  254. return -SYNX_NOMEM;
  255. if (IS_ERR_OR_NULL(cores) || !synx_is_valid_idx(idx))
  256. return -SYNX_INVALID;
  257. rc = synx_gmem_lock(idx, &flags);
  258. if (rc)
  259. return rc;
  260. synx_g_obj = &synx_gmem.table[idx];
  261. for (i = 0; i < SYNX_CORE_MAX; i++)
  262. if (synx_g_obj->subscribers & (1UL << i))
  263. cores[i] = true;
  264. synx_gmem_unlock(idx, &flags);
  265. return SYNX_SUCCESS;
  266. }
  267. int synx_global_fetch_handle_details(u32 idx, u32 *h_synx)
  268. {
  269. int rc;
  270. unsigned long flags;
  271. struct synx_global_coredata *synx_g_obj;
  272. if (!synx_gmem.table)
  273. return -SYNX_NOMEM;
  274. if (IS_ERR_OR_NULL(h_synx) || !synx_is_valid_idx(idx))
  275. return -SYNX_INVALID;
  276. rc = synx_gmem_lock(idx, &flags);
  277. if (rc)
  278. return rc;
  279. synx_g_obj = &synx_gmem.table[idx];
  280. *h_synx = synx_g_obj->handle;
  281. synx_gmem_unlock(idx, &flags);
  282. return SYNX_SUCCESS;
  283. }
  284. int synx_global_set_subscribed_core(u32 idx, enum synx_core_id id)
  285. {
  286. int rc;
  287. unsigned long flags;
  288. struct synx_global_coredata *synx_g_obj;
  289. if (!synx_gmem.table)
  290. return -SYNX_NOMEM;
  291. if (id >= SYNX_CORE_MAX || !synx_is_valid_idx(idx))
  292. return -SYNX_INVALID;
  293. rc = synx_gmem_lock(idx, &flags);
  294. if (rc)
  295. return rc;
  296. synx_g_obj = &synx_gmem.table[idx];
  297. synx_g_obj->subscribers |= (1UL << id);
  298. synx_gmem_unlock(idx, &flags);
  299. return SYNX_SUCCESS;
  300. }
  301. int synx_global_clear_subscribed_core(u32 idx, enum synx_core_id id)
  302. {
  303. int rc;
  304. unsigned long flags;
  305. struct synx_global_coredata *synx_g_obj;
  306. if (!synx_gmem.table)
  307. return -SYNX_NOMEM;
  308. if (id >= SYNX_CORE_MAX || !synx_is_valid_idx(idx))
  309. return -SYNX_INVALID;
  310. rc = synx_gmem_lock(idx, &flags);
  311. if (rc)
  312. return rc;
  313. synx_g_obj = &synx_gmem.table[idx];
  314. synx_g_obj->subscribers &= ~(1UL << id);
  315. synx_gmem_unlock(idx, &flags);
  316. return SYNX_SUCCESS;
  317. }
  318. u32 synx_global_get_parents_num(u32 idx)
  319. {
  320. int rc;
  321. unsigned long flags;
  322. struct synx_global_coredata *synx_g_obj;
  323. u32 i, count = 0;
  324. if (!synx_gmem.table)
  325. return 0;
  326. if (!synx_is_valid_idx(idx))
  327. return 0;
  328. rc = synx_gmem_lock(idx, &flags);
  329. if (rc)
  330. return rc;
  331. synx_g_obj = &synx_gmem.table[idx];
  332. for (i = 0; i < SYNX_GLOBAL_MAX_PARENTS; i++) {
  333. if (synx_g_obj->parents[i] != 0)
  334. count++;
  335. }
  336. synx_gmem_unlock(idx, &flags);
  337. return count;
  338. }
  339. static int synx_global_get_parents_locked(
  340. struct synx_global_coredata *synx_g_obj, u32 *parents)
  341. {
  342. u32 i;
  343. if (!synx_g_obj || !parents)
  344. return -SYNX_NOMEM;
  345. for (i = 0; i < SYNX_GLOBAL_MAX_PARENTS; i++)
  346. parents[i] = synx_g_obj->parents[i];
  347. return SYNX_SUCCESS;
  348. }
  349. int synx_global_get_parents(u32 idx, u32 *parents)
  350. {
  351. int rc;
  352. unsigned long flags;
  353. struct synx_global_coredata *synx_g_obj;
  354. if (!synx_gmem.table || !parents)
  355. return -SYNX_NOMEM;
  356. if (!synx_is_valid_idx(idx))
  357. return -SYNX_INVALID;
  358. rc = synx_gmem_lock(idx, &flags);
  359. if (rc)
  360. return rc;
  361. synx_g_obj = &synx_gmem.table[idx];
  362. rc = synx_global_get_parents_locked(synx_g_obj, parents);
  363. synx_gmem_unlock(idx, &flags);
  364. return rc;
  365. }
  366. u32 synx_global_get_status(u32 idx)
  367. {
  368. int rc;
  369. unsigned long flags;
  370. u32 status = SYNX_STATE_ACTIVE;
  371. struct synx_global_coredata *synx_g_obj;
  372. if (!synx_gmem.table)
  373. return 0;
  374. if (!synx_is_valid_idx(idx))
  375. return 0;
  376. rc = synx_gmem_lock(idx, &flags);
  377. if (rc)
  378. return rc;
  379. synx_g_obj = &synx_gmem.table[idx];
  380. if (synx_g_obj->status != SYNX_STATE_ACTIVE && synx_g_obj->num_child == 0)
  381. status = synx_g_obj->status;
  382. synx_gmem_unlock(idx, &flags);
  383. return status;
  384. }
  385. u32 synx_global_test_status_set_wait(u32 idx,
  386. enum synx_core_id id)
  387. {
  388. int rc;
  389. unsigned long flags;
  390. u32 status;
  391. struct synx_global_coredata *synx_g_obj;
  392. if (!synx_gmem.table)
  393. return 0;
  394. if (id >= SYNX_CORE_MAX || !synx_is_valid_idx(idx))
  395. return 0;
  396. rc = synx_gmem_lock(idx, &flags);
  397. if (rc)
  398. return 0;
  399. synx_g_obj = &synx_gmem.table[idx];
  400. synx_global_print_data(synx_g_obj, __func__);
  401. status = synx_g_obj->status;
  402. /* if handle is still ACTIVE */
  403. if (status == SYNX_STATE_ACTIVE || synx_g_obj->num_child != 0) {
  404. synx_g_obj->waiters |= (1UL << id);
  405. status = SYNX_STATE_ACTIVE;
  406. }
  407. else
  408. dprintk(SYNX_DBG, "handle %u already signaled %u",
  409. synx_g_obj->handle, synx_g_obj->status);
  410. synx_gmem_unlock(idx, &flags);
  411. return status;
  412. }
  413. static int synx_global_update_status_core(u32 idx,
  414. u32 status)
  415. {
  416. u32 i, p_idx;
  417. int rc;
  418. bool clear = false;
  419. unsigned long flags;
  420. uint64_t data;
  421. struct synx_global_coredata *synx_g_obj;
  422. u32 h_parents[SYNX_GLOBAL_MAX_PARENTS] = {0};
  423. bool wait_cores[SYNX_CORE_MAX] = {false};
  424. rc = synx_gmem_lock(idx, &flags);
  425. if (rc)
  426. return rc;
  427. synx_g_obj = &synx_gmem.table[idx];
  428. synx_global_print_data(synx_g_obj, __func__);
  429. /* prepare for cross core signaling */
  430. data = synx_g_obj->handle;
  431. data <<= 32;
  432. if (synx_g_obj->num_child != 0) {
  433. /* composite handle */
  434. synx_g_obj->num_child--;
  435. if (synx_g_obj->status == SYNX_STATE_ACTIVE ||
  436. (status > SYNX_STATE_SIGNALED_SUCCESS &&
  437. status <= SYNX_STATE_SIGNALED_MAX))
  438. synx_g_obj->status = status;
  439. if (synx_g_obj->num_child == 0) {
  440. data |= synx_g_obj->status;
  441. synx_global_get_waiting_cores_locked(synx_g_obj,
  442. wait_cores);
  443. synx_global_get_parents_locked(synx_g_obj, h_parents);
  444. /* release ref held by constituting handles */
  445. synx_g_obj->refcount--;
  446. if (synx_g_obj->refcount == 0) {
  447. memset(synx_g_obj, 0,
  448. sizeof(*synx_g_obj));
  449. clear = true;
  450. }
  451. } else {
  452. /* pending notification from handles */
  453. data = 0;
  454. dprintk(SYNX_DBG,
  455. "Child notified parent handle %u, pending %u\n",
  456. synx_g_obj->handle, synx_g_obj->num_child);
  457. }
  458. } else {
  459. synx_g_obj->status = status;
  460. data |= synx_g_obj->status;
  461. synx_global_get_waiting_cores_locked(synx_g_obj,
  462. wait_cores);
  463. synx_global_get_parents_locked(synx_g_obj, h_parents);
  464. }
  465. synx_gmem_unlock(idx, &flags);
  466. if (clear) {
  467. ipclite_global_test_and_clear_bit(idx%32,
  468. (ipclite_atomic_uint32_t *)(synx_gmem.bitmap + idx/32));
  469. dprintk(SYNX_MEM,
  470. "cleared global idx %u\n", idx);
  471. }
  472. /* notify waiting clients on signal */
  473. if (data) {
  474. /* notify wait client */
  475. /* In case of SSR, someone might be waiting on same core
  476. * However, in other cases, synx_signal API will take care
  477. * of signaling handles on same core and thus we don't need
  478. * to send interrupt
  479. */
  480. if (status == SYNX_STATE_SIGNALED_SSR)
  481. i = 0;
  482. else
  483. i = 1;
  484. for (; i < SYNX_CORE_MAX ; i++) {
  485. if (!wait_cores[i])
  486. continue;
  487. dprintk(SYNX_DBG,
  488. "invoking ipc signal handle %u, status %u\n",
  489. synx_g_obj->handle, synx_g_obj->status);
  490. if (ipclite_msg_send(
  491. synx_global_map_core_id(i),
  492. data))
  493. dprintk(SYNX_ERR,
  494. "ipc signaling %llu to core %u failed\n",
  495. data, i);
  496. }
  497. }
  498. /* handle parent notifications */
  499. for (i = 0; i < SYNX_GLOBAL_MAX_PARENTS; i++) {
  500. p_idx = h_parents[i];
  501. if (p_idx == 0)
  502. continue;
  503. synx_global_update_status_core(p_idx, status);
  504. }
  505. return SYNX_SUCCESS;
  506. }
  507. int synx_global_update_status(u32 idx, u32 status)
  508. {
  509. int rc = -SYNX_INVALID;
  510. unsigned long flags;
  511. struct synx_global_coredata *synx_g_obj;
  512. if (!synx_gmem.table)
  513. return -SYNX_NOMEM;
  514. if (!synx_is_valid_idx(idx) || status <= SYNX_STATE_ACTIVE)
  515. return -SYNX_INVALID;
  516. rc = synx_gmem_lock(idx, &flags);
  517. if (rc)
  518. return rc;
  519. synx_g_obj = &synx_gmem.table[idx];
  520. if (synx_g_obj->num_child != 0) {
  521. /* composite handle cannot be signaled */
  522. goto fail;
  523. } else if (synx_g_obj->status != SYNX_STATE_ACTIVE) {
  524. rc = -SYNX_ALREADY;
  525. goto fail;
  526. }
  527. synx_gmem_unlock(idx, &flags);
  528. return synx_global_update_status_core(idx, status);
  529. fail:
  530. synx_gmem_unlock(idx, &flags);
  531. return rc;
  532. }
  533. int synx_global_get_ref(u32 idx)
  534. {
  535. int rc;
  536. unsigned long flags;
  537. struct synx_global_coredata *synx_g_obj;
  538. if (!synx_gmem.table)
  539. return -SYNX_NOMEM;
  540. if (!synx_is_valid_idx(idx))
  541. return -SYNX_INVALID;
  542. rc = synx_gmem_lock(idx, &flags);
  543. if (rc)
  544. return rc;
  545. synx_g_obj = &synx_gmem.table[idx];
  546. synx_global_print_data(synx_g_obj, __func__);
  547. if (synx_g_obj->handle && synx_g_obj->refcount)
  548. synx_g_obj->refcount++;
  549. else
  550. rc = -SYNX_NOENT;
  551. synx_gmem_unlock(idx, &flags);
  552. return rc;
  553. }
  554. void synx_global_put_ref(u32 idx)
  555. {
  556. int rc;
  557. bool clear = false;
  558. unsigned long flags;
  559. struct synx_global_coredata *synx_g_obj;
  560. if (!synx_gmem.table)
  561. return;
  562. if (!synx_is_valid_idx(idx))
  563. return;
  564. rc = synx_gmem_lock(idx, &flags);
  565. if (rc)
  566. return;
  567. synx_g_obj = &synx_gmem.table[idx];
  568. synx_g_obj->refcount--;
  569. if (synx_g_obj->refcount == 0) {
  570. memset(synx_g_obj, 0, sizeof(*synx_g_obj));
  571. clear = true;
  572. }
  573. synx_gmem_unlock(idx, &flags);
  574. if (clear) {
  575. ipclite_global_test_and_clear_bit(idx%32,
  576. (ipclite_atomic_uint32_t *)(synx_gmem.bitmap + idx/32));
  577. dprintk(SYNX_MEM, "cleared global idx %u\n", idx);
  578. }
  579. }
  580. int synx_global_merge(u32 *idx_list, u32 num_list, u32 p_idx)
  581. {
  582. int rc = -SYNX_INVALID;
  583. unsigned long flags;
  584. struct synx_global_coredata *synx_g_obj;
  585. u32 i, j = 0;
  586. u32 idx;
  587. u32 num_child = 0;
  588. u32 parent_status = SYNX_STATE_ACTIVE;
  589. if (!synx_gmem.table)
  590. return -SYNX_NOMEM;
  591. if (!synx_is_valid_idx(p_idx))
  592. return -SYNX_INVALID;
  593. if (num_list == 0)
  594. return SYNX_SUCCESS;
  595. while (j < num_list) {
  596. idx = idx_list[j];
  597. if (!synx_is_valid_idx(idx))
  598. goto fail;
  599. rc = synx_gmem_lock(idx, &flags);
  600. if (rc)
  601. goto fail;
  602. synx_g_obj = &synx_gmem.table[idx];
  603. for (i = 0; i < SYNX_GLOBAL_MAX_PARENTS; i++) {
  604. if (synx_g_obj->parents[i] == 0) {
  605. synx_g_obj->parents[i] = p_idx;
  606. break;
  607. }
  608. }
  609. if (synx_g_obj->status == SYNX_STATE_ACTIVE)
  610. num_child++;
  611. else if (synx_g_obj->status >
  612. SYNX_STATE_SIGNALED_SUCCESS &&
  613. synx_g_obj->status <= SYNX_STATE_SIGNALED_MAX)
  614. parent_status = synx_g_obj->status;
  615. else if (parent_status == SYNX_STATE_ACTIVE)
  616. parent_status = synx_g_obj->status;
  617. if (synx_g_obj->status != SYNX_STATE_ACTIVE && synx_g_obj->num_child != 0)
  618. num_child++;
  619. dprintk(SYNX_MEM, "synx_obj->status %d parent status %d\n",
  620. synx_g_obj->status, parent_status);
  621. synx_gmem_unlock(idx, &flags);
  622. if (i >= SYNX_GLOBAL_MAX_PARENTS) {
  623. rc = -SYNX_NOMEM;
  624. goto fail;
  625. }
  626. j++;
  627. }
  628. rc = synx_gmem_lock(p_idx, &flags);
  629. if (rc)
  630. goto fail;
  631. synx_g_obj = &synx_gmem.table[p_idx];
  632. synx_g_obj->num_child += num_child;
  633. if (synx_g_obj->num_child != 0)
  634. synx_g_obj->refcount++;
  635. synx_g_obj->status = parent_status;
  636. synx_global_print_data(synx_g_obj, __func__);
  637. synx_gmem_unlock(p_idx, &flags);
  638. return SYNX_SUCCESS;
  639. fail:
  640. while (num_child--) {
  641. idx = idx_list[num_child];
  642. if (synx_gmem_lock(idx, &flags))
  643. continue;
  644. synx_g_obj = &synx_gmem.table[idx];
  645. for (i = 0; i < SYNX_GLOBAL_MAX_PARENTS; i++) {
  646. if (synx_g_obj->parents[i] == p_idx) {
  647. synx_g_obj->parents[i] = 0;
  648. break;
  649. }
  650. }
  651. synx_gmem_unlock(idx, &flags);
  652. }
  653. return rc;
  654. }
  655. int synx_global_recover(enum synx_core_id core_id)
  656. {
  657. int rc = SYNX_SUCCESS;
  658. u32 idx = 0;
  659. const u32 size = SYNX_GLOBAL_MAX_OBJS;
  660. unsigned long flags;
  661. struct synx_global_coredata *synx_g_obj;
  662. bool update;
  663. int *clear_idx = NULL;
  664. if (!synx_gmem.table)
  665. return -SYNX_NOMEM;
  666. clear_idx = kzalloc(sizeof(int)*SYNX_GLOBAL_MAX_OBJS, GFP_KERNEL);
  667. if (!clear_idx)
  668. return -SYNX_NOMEM;
  669. ipclite_recover(synx_global_map_core_id(core_id));
  670. /* recover synx gmem lock if it was owned by core in ssr */
  671. if (synx_gmem_lock_owner(0) == core_id) {
  672. synx_gmem_lock_owner_clear(0);
  673. hwspin_unlock_raw(synx_hwlock);
  674. }
  675. idx = find_next_bit((unsigned long *)synx_gmem.bitmap,
  676. size, idx + 1);
  677. while (idx < size) {
  678. update = false;
  679. rc = synx_gmem_lock(idx, &flags);
  680. if (rc)
  681. goto free;
  682. synx_g_obj = &synx_gmem.table[idx];
  683. if (synx_g_obj->refcount &&
  684. synx_g_obj->subscribers & (1UL << core_id)) {
  685. synx_g_obj->subscribers &= ~(1UL << core_id);
  686. synx_g_obj->refcount--;
  687. if (synx_g_obj->refcount == 0) {
  688. memset(synx_g_obj, 0, sizeof(*synx_g_obj));
  689. clear_idx[idx] = 1;
  690. } else if (synx_g_obj->status == SYNX_STATE_ACTIVE) {
  691. update = true;
  692. }
  693. }
  694. synx_gmem_unlock(idx, &flags);
  695. if (update)
  696. synx_global_update_status(idx,
  697. SYNX_STATE_SIGNALED_SSR);
  698. idx = find_next_bit((unsigned long *)synx_gmem.bitmap,
  699. size, idx + 1);
  700. }
  701. for (idx = 1; idx < size; idx++) {
  702. if (clear_idx[idx]) {
  703. ipclite_global_test_and_clear_bit(idx % 32,
  704. (ipclite_atomic_uint32_t *)(synx_gmem.bitmap + idx/32));
  705. dprintk(SYNX_MEM, "released global idx %u\n", idx);
  706. }
  707. }
  708. free:
  709. kfree(clear_idx);
  710. return rc;
  711. }
  712. int synx_global_mem_init(void)
  713. {
  714. int rc;
  715. int bitmap_size = SYNX_GLOBAL_MAX_OBJS/32;
  716. struct global_region_info mem_info;
  717. rc = get_global_partition_info(&mem_info);
  718. if (rc) {
  719. dprintk(SYNX_ERR, "error setting up global shared memory\n");
  720. return rc;
  721. }
  722. memset(mem_info.virt_base, 0, mem_info.size);
  723. dprintk(SYNX_DBG, "global shared memory %pK size %u\n",
  724. mem_info.virt_base, mem_info.size);
  725. synx_gmem.bitmap = (u32 *)mem_info.virt_base;
  726. synx_gmem.locks = synx_gmem.bitmap + bitmap_size;
  727. synx_gmem.table =
  728. (struct synx_global_coredata *)(synx_gmem.locks + 2);
  729. dprintk(SYNX_DBG, "global memory bitmap %pK, table %pK\n",
  730. synx_gmem.bitmap, synx_gmem.table);
  731. return synx_gmem_init();
  732. }