ipclite.c 49 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2021-2023, Qualcomm Innovation Center, Inc. All rights reserved.
  4. */
  5. #define pr_fmt(fmt) "%s:%s: " fmt, KBUILD_MODNAME, __func__
  6. #include <linux/module.h>
  7. #include <linux/irq.h>
  8. #include <linux/interrupt.h>
  9. #include <linux/irqdomain.h>
  10. #include <linux/platform_device.h>
  11. #include <linux/of.h>
  12. #include <linux/of_address.h>
  13. #include <linux/of_irq.h>
  14. #include <asm/memory.h>
  15. #include <linux/sizes.h>
  16. #include <linux/hwspinlock.h>
  17. #include <linux/qcom_scm.h>
  18. #include <linux/sysfs.h>
  19. #include "ipclite_client.h"
  20. #include "ipclite.h"
  21. #define GLOBAL_ATOMICS_ENABLED 1
  22. #define GLOBAL_ATOMICS_DISABLED 0
  23. #define FIFO_FULL_RESERVE 8
  24. #define FIFO_ALIGNMENT 8
  25. static struct ipclite_info *ipclite;
  26. static struct ipclite_client synx_client;
  27. static struct ipclite_client test_client;
  28. static struct ipclite_hw_mutex_ops *ipclite_hw_mutex;
  29. static struct ipclite_debug_info *ipclite_dbg_info;
  30. static struct ipclite_debug_struct *ipclite_dbg_struct;
  31. static struct ipclite_debug_inmem_buf *ipclite_dbg_inmem;
  32. static struct mutex ssr_mutex;
  33. static struct kobject *sysfs_kobj;
  34. static uint32_t enabled_hosts;
  35. static uint32_t partitions;
  36. static u32 global_atomic_support = GLOBAL_ATOMICS_ENABLED;
  37. static uint32_t ipclite_debug_level = IPCLITE_ERR | IPCLITE_WARN | IPCLITE_INFO;
  38. static uint32_t ipclite_debug_control = IPCLITE_DMESG_LOG, ipclite_debug_dump;
  39. static inline bool is_host_enabled(uint32_t host)
  40. {
  41. return (1U & (enabled_hosts >> host));
  42. }
  43. static inline bool is_loopback_except_apps(uint32_t h0, uint32_t h1)
  44. {
  45. return (h0 == h1 && h0 != IPCMEM_APPS);
  46. }
  47. static void IPCLITE_OS_INMEM_LOG(const char *psztStr, ...)
  48. {
  49. uint32_t local_index = 0;
  50. va_list pArgs;
  51. va_start(pArgs, psztStr);
  52. /* Incrementing the index atomically and storing the index in local variable */
  53. local_index = ipclite_global_atomic_inc((ipclite_atomic_int32_t *)
  54. &ipclite_dbg_info->debug_log_index);
  55. local_index %= IPCLITE_LOG_BUF_SIZE;
  56. /* Writes data on the index location */
  57. vsnprintf(ipclite_dbg_inmem->IPCLITELog[local_index], IPCLITE_LOG_MSG_SIZE, psztStr, pArgs);
  58. va_end(pArgs);
  59. }
  60. static void ipclite_dump_debug_struct(void)
  61. {
  62. int i, host;
  63. struct ipclite_debug_struct *temp_dbg_struct;
  64. /* Check if debug structures are initialized */
  65. if (!ipclite_dbg_info || !ipclite_dbg_struct) {
  66. pr_err("Debug Structures not initialized\n");
  67. return;
  68. }
  69. /* Check if debug structures are enabled before printing */
  70. if (!(ipclite_debug_control & IPCLITE_DBG_STRUCT)) {
  71. pr_err("Debug Structures not enabled\n");
  72. return;
  73. }
  74. /* Dumping the debug structures */
  75. pr_info("------------------- Dumping IPCLite Debug Structure -------------------\n");
  76. for (host = 0; host < IPCMEM_NUM_HOSTS; host++) {
  77. if (!is_host_enabled(host))
  78. continue;
  79. temp_dbg_struct = (struct ipclite_debug_struct *)
  80. (((char *)ipclite_dbg_struct) +
  81. (sizeof(*temp_dbg_struct) * host));
  82. pr_info("---------- Host ID: %d dbg_mem:%p ----------\n",
  83. host, temp_dbg_struct);
  84. pr_info("Total Signals Sent : %d Total Signals Received : %d\n",
  85. temp_dbg_struct->dbg_info_overall.total_numsig_sent,
  86. temp_dbg_struct->dbg_info_overall.total_numsig_recv);
  87. pr_info("Last Signal Sent to Host ID : %d Last Signal Received from Host ID : %d\n",
  88. temp_dbg_struct->dbg_info_overall.last_sent_host_id,
  89. temp_dbg_struct->dbg_info_overall.last_recv_host_id);
  90. pr_info("Last Signal ID Sent : %d Last Signal ID Received : %d\n",
  91. temp_dbg_struct->dbg_info_overall.last_sigid_sent,
  92. temp_dbg_struct->dbg_info_overall.last_sigid_recv);
  93. for (i = 0; i < IPCMEM_NUM_HOSTS; i++) {
  94. if (!is_host_enabled(i))
  95. continue;
  96. pr_info("----------> Host ID : %d Host ID : %d\n", host, i);
  97. pr_info("No. of Messages Sent : %d No. of Messages Received : %d\n",
  98. temp_dbg_struct->dbg_info_host[i].numsig_sent,
  99. temp_dbg_struct->dbg_info_host[i].numsig_recv);
  100. pr_info("No. of Interrupts Received : %d\n",
  101. temp_dbg_struct->dbg_info_host[i].num_intr);
  102. pr_info("TX Write Index : %d TX Read Index : %d\n",
  103. temp_dbg_struct->dbg_info_host[i].tx_wr_index,
  104. temp_dbg_struct->dbg_info_host[i].tx_rd_index);
  105. pr_info("TX Write Index[0] : %d TX Read Index[0] : %d\n",
  106. temp_dbg_struct->dbg_info_host[i].prev_tx_wr_index[0],
  107. temp_dbg_struct->dbg_info_host[i].prev_tx_rd_index[0]);
  108. pr_info("TX Write Index[1] : %d TX Read Index[1] : %d\n",
  109. temp_dbg_struct->dbg_info_host[i].prev_tx_wr_index[1],
  110. temp_dbg_struct->dbg_info_host[i].prev_tx_rd_index[1]);
  111. pr_info("RX Write Index : %d RX Read Index : %d\n",
  112. temp_dbg_struct->dbg_info_host[i].rx_wr_index,
  113. temp_dbg_struct->dbg_info_host[i].rx_rd_index);
  114. pr_info("RX Write Index[0] : %d RX Read Index[0] : %d\n",
  115. temp_dbg_struct->dbg_info_host[i].prev_rx_wr_index[0],
  116. temp_dbg_struct->dbg_info_host[i].prev_rx_rd_index[0]);
  117. pr_info("RX Write Index[1] : %d RX Read Index[1] : %d\n",
  118. temp_dbg_struct->dbg_info_host[i].prev_rx_wr_index[1],
  119. temp_dbg_struct->dbg_info_host[i].prev_rx_rd_index[1]);
  120. }
  121. }
  122. return;
  123. }
  124. static void ipclite_dump_inmem_logs(void)
  125. {
  126. int i;
  127. uint32_t local_index = 0;
  128. /* Check if debug and inmem structures are initialized */
  129. if (!ipclite_dbg_info || !ipclite_dbg_inmem) {
  130. pr_err("Debug structures not initialized\n");
  131. return;
  132. }
  133. /* Check if debug structures are enabled before printing */
  134. if (!(ipclite_debug_control & IPCLITE_INMEM_LOG)) {
  135. pr_err("In-Memory Logs not enabled\n");
  136. return;
  137. }
  138. /* Dumping the debug in-memory logs */
  139. pr_info("------------------- Dumping In-Memory Logs -------------------\n");
  140. /* Storing the index atomically in local variable */
  141. local_index = ipclite_global_atomic_load_u32((ipclite_atomic_uint32_t *)
  142. &ipclite_dbg_info->debug_log_index);
  143. /* Printing from current index till the end of buffer */
  144. for (i = local_index % IPCLITE_LOG_BUF_SIZE; i < IPCLITE_LOG_BUF_SIZE; i++) {
  145. if (ipclite_dbg_inmem->IPCLITELog[i][0])
  146. pr_info("%s\n", ipclite_dbg_inmem->IPCLITELog[i]);
  147. }
  148. /* Printing from 0th index to current-1 index */
  149. for (i = 0; i < local_index % IPCLITE_LOG_BUF_SIZE; i++) {
  150. if (ipclite_dbg_inmem->IPCLITELog[i][0])
  151. pr_info("%s\n", ipclite_dbg_inmem->IPCLITELog[i]);
  152. }
  153. return;
  154. }
  155. static void ipclite_hw_mutex_acquire(void)
  156. {
  157. int32_t ret;
  158. if (ipclite != NULL) {
  159. if (!global_atomic_support) {
  160. ret = hwspin_lock_timeout_irqsave(ipclite->hwlock,
  161. HWSPINLOCK_TIMEOUT,
  162. &ipclite->ipclite_hw_mutex->flags);
  163. if (ret) {
  164. IPCLITE_OS_LOG(IPCLITE_ERR, "Hw mutex lock acquire failed\n");
  165. return;
  166. }
  167. ipclite->ipcmem.toc_data.host_info->hwlock_owner = IPCMEM_APPS;
  168. IPCLITE_OS_LOG(IPCLITE_DBG, "Hw mutex lock acquired\n");
  169. }
  170. }
  171. }
  172. static void ipclite_hw_mutex_release(void)
  173. {
  174. if (ipclite != NULL) {
  175. if (!global_atomic_support) {
  176. ipclite->ipcmem.toc_data.host_info->hwlock_owner = IPCMEM_INVALID_HOST;
  177. hwspin_unlock_irqrestore(ipclite->hwlock,
  178. &ipclite->ipclite_hw_mutex->flags);
  179. IPCLITE_OS_LOG(IPCLITE_DBG, "Hw mutex lock release\n");
  180. }
  181. }
  182. }
  183. void ipclite_atomic_init_u32(ipclite_atomic_uint32_t *addr, uint32_t data)
  184. {
  185. atomic_set(addr, data);
  186. }
  187. EXPORT_SYMBOL(ipclite_atomic_init_u32);
  188. void ipclite_atomic_init_i32(ipclite_atomic_int32_t *addr, int32_t data)
  189. {
  190. atomic_set(addr, data);
  191. }
  192. EXPORT_SYMBOL(ipclite_atomic_init_i32);
  193. void ipclite_global_atomic_store_u32(ipclite_atomic_uint32_t *addr, uint32_t data)
  194. {
  195. /* callback to acquire hw mutex lock if atomic support is not enabled */
  196. ipclite->ipclite_hw_mutex->acquire();
  197. atomic_set(addr, data);
  198. /* callback to release hw mutex lock if atomic support is not enabled */
  199. ipclite->ipclite_hw_mutex->release();
  200. }
  201. EXPORT_SYMBOL(ipclite_global_atomic_store_u32);
  202. void ipclite_global_atomic_store_i32(ipclite_atomic_int32_t *addr, int32_t data)
  203. {
  204. /* callback to acquire hw mutex lock if atomic support is not enabled */
  205. ipclite->ipclite_hw_mutex->acquire();
  206. atomic_set(addr, data);
  207. /* callback to release hw mutex lock if atomic support is not enabled */
  208. ipclite->ipclite_hw_mutex->release();
  209. }
  210. EXPORT_SYMBOL(ipclite_global_atomic_store_i32);
  211. uint32_t ipclite_global_atomic_load_u32(ipclite_atomic_uint32_t *addr)
  212. {
  213. uint32_t ret;
  214. /* callback to acquire hw mutex lock if atomic support is not enabled */
  215. ipclite->ipclite_hw_mutex->acquire();
  216. ret = atomic_read(addr);
  217. /* callback to release hw mutex lock if atomic support is not enabled */
  218. ipclite->ipclite_hw_mutex->release();
  219. return ret;
  220. }
  221. EXPORT_SYMBOL(ipclite_global_atomic_load_u32);
  222. int32_t ipclite_global_atomic_load_i32(ipclite_atomic_int32_t *addr)
  223. {
  224. int32_t ret;
  225. /* callback to acquire hw mutex lock if atomic support is not enabled */
  226. ipclite->ipclite_hw_mutex->acquire();
  227. ret = atomic_read(addr);
  228. /* callback to release hw mutex lock if atomic support is not enabled */
  229. ipclite->ipclite_hw_mutex->release();
  230. return ret;
  231. }
  232. EXPORT_SYMBOL(ipclite_global_atomic_load_i32);
  233. uint32_t ipclite_global_test_and_set_bit(uint32_t nr, ipclite_atomic_uint32_t *addr)
  234. {
  235. uint32_t ret;
  236. uint32_t mask = (1 << nr);
  237. /* callback to acquire hw mutex lock if atomic support is not enabled */
  238. ipclite->ipclite_hw_mutex->acquire();
  239. ret = atomic_fetch_or(mask, addr);
  240. /* callback to release hw mutex lock if atomic support is not enabled */
  241. ipclite->ipclite_hw_mutex->release();
  242. return ret;
  243. }
  244. EXPORT_SYMBOL(ipclite_global_test_and_set_bit);
  245. uint32_t ipclite_global_test_and_clear_bit(uint32_t nr, ipclite_atomic_uint32_t *addr)
  246. {
  247. uint32_t ret;
  248. uint32_t mask = (1 << nr);
  249. /* callback to acquire hw mutex lock if atomic support is not enabled */
  250. ipclite->ipclite_hw_mutex->acquire();
  251. ret = atomic_fetch_and(~mask, addr);
  252. /* callback to release hw mutex lock if atomic support is not enabled */
  253. ipclite->ipclite_hw_mutex->release();
  254. return ret;
  255. }
  256. EXPORT_SYMBOL(ipclite_global_test_and_clear_bit);
  257. int32_t ipclite_global_atomic_inc(ipclite_atomic_int32_t *addr)
  258. {
  259. int32_t ret = 0;
  260. /* callback to acquire hw mutex lock if atomic support is not enabled */
  261. ipclite->ipclite_hw_mutex->acquire();
  262. ret = atomic_fetch_add(1, addr);
  263. /* callback to release hw mutex lock if atomic support is not enabled */
  264. ipclite->ipclite_hw_mutex->release();
  265. return ret;
  266. }
  267. EXPORT_SYMBOL(ipclite_global_atomic_inc);
  268. int32_t ipclite_global_atomic_dec(ipclite_atomic_int32_t *addr)
  269. {
  270. int32_t ret = 0;
  271. /* callback to acquire hw mutex lock if atomic support is not enabled */
  272. ipclite->ipclite_hw_mutex->acquire();
  273. ret = atomic_fetch_sub(1, addr);
  274. /* callback to release hw mutex lock if atomic support is not enabled */
  275. ipclite->ipclite_hw_mutex->release();
  276. return ret;
  277. }
  278. EXPORT_SYMBOL(ipclite_global_atomic_dec);
  279. static size_t ipcmem_rx_avail(struct ipclite_fifo *rx_fifo)
  280. {
  281. size_t len;
  282. u32 head;
  283. u32 tail;
  284. head = le32_to_cpu(*rx_fifo->head);
  285. tail = le32_to_cpu(*rx_fifo->tail);
  286. IPCLITE_OS_LOG(IPCLITE_DBG, "head=%d, tail=%d\n", head, tail);
  287. if (head < tail)
  288. len = rx_fifo->length - tail + head;
  289. else
  290. len = head - tail;
  291. if (WARN_ON_ONCE(len > rx_fifo->length))
  292. len = 0;
  293. IPCLITE_OS_LOG(IPCLITE_DBG, "len=%d\n", len);
  294. return len;
  295. }
  296. static void ipcmem_rx_peak(struct ipclite_fifo *rx_fifo,
  297. void *data, size_t count)
  298. {
  299. size_t len;
  300. u32 tail;
  301. tail = le32_to_cpu(*rx_fifo->tail);
  302. if (WARN_ON_ONCE(tail > rx_fifo->length))
  303. return;
  304. if (tail >= rx_fifo->length)
  305. tail -= rx_fifo->length;
  306. len = min_t(size_t, count, rx_fifo->length - tail);
  307. if (len)
  308. memcpy_fromio(data, rx_fifo->fifo + tail, len);
  309. if (len != count)
  310. memcpy_fromio(data + len, rx_fifo->fifo, (count - len));
  311. }
  312. static void ipcmem_rx_advance(struct ipclite_fifo *rx_fifo,
  313. size_t count, uint32_t core_id)
  314. {
  315. u32 tail;
  316. tail = le32_to_cpu(*rx_fifo->tail);
  317. tail += count;
  318. if (tail >= rx_fifo->length)
  319. tail %= rx_fifo->length;
  320. *rx_fifo->tail = cpu_to_le32(tail);
  321. /* Storing the debug data in debug structures */
  322. if (ipclite_debug_control & IPCLITE_DBG_STRUCT) {
  323. ipclite_dbg_struct->dbg_info_host[core_id].prev_rx_wr_index[1] =
  324. ipclite_dbg_struct->dbg_info_host[core_id].prev_rx_wr_index[0];
  325. ipclite_dbg_struct->dbg_info_host[core_id].prev_rx_wr_index[0] =
  326. ipclite_dbg_struct->dbg_info_host[core_id].rx_wr_index;
  327. ipclite_dbg_struct->dbg_info_host[core_id].rx_wr_index = *rx_fifo->head;
  328. ipclite_dbg_struct->dbg_info_host[core_id].prev_rx_rd_index[1] =
  329. ipclite_dbg_struct->dbg_info_host[core_id].prev_rx_rd_index[0];
  330. ipclite_dbg_struct->dbg_info_host[core_id].prev_rx_rd_index[0] =
  331. ipclite_dbg_struct->dbg_info_host[core_id].rx_rd_index;
  332. ipclite_dbg_struct->dbg_info_host[core_id].rx_rd_index = *rx_fifo->tail;
  333. ipclite_dbg_struct->dbg_info_overall.total_numsig_recv++;
  334. ipclite_dbg_struct->dbg_info_host[core_id].numsig_recv++;
  335. }
  336. }
  337. static size_t ipcmem_tx_avail(struct ipclite_fifo *tx_fifo)
  338. {
  339. u32 head;
  340. u32 tail;
  341. u32 avail;
  342. head = le32_to_cpu(*tx_fifo->head);
  343. tail = le32_to_cpu(*tx_fifo->tail);
  344. if (tail <= head)
  345. avail = tx_fifo->length - head + tail;
  346. else
  347. avail = tail - head;
  348. if (avail < FIFO_FULL_RESERVE)
  349. avail = 0;
  350. else
  351. avail -= FIFO_FULL_RESERVE;
  352. if (WARN_ON_ONCE(avail > tx_fifo->length))
  353. avail = 0;
  354. return avail;
  355. }
  356. static unsigned int ipcmem_tx_write_one(struct ipclite_fifo *tx_fifo,
  357. unsigned int head,
  358. const void *data, size_t count)
  359. {
  360. size_t len;
  361. if (WARN_ON_ONCE(head > tx_fifo->length))
  362. return head;
  363. len = min_t(size_t, count, tx_fifo->length - head);
  364. if (len)
  365. memcpy(tx_fifo->fifo + head, data, len);
  366. if (len != count)
  367. memcpy(tx_fifo->fifo, data + len, count - len);
  368. head += count;
  369. if (head >= tx_fifo->length)
  370. head -= tx_fifo->length;
  371. return head;
  372. }
  373. static void ipcmem_tx_write(struct ipclite_fifo *tx_fifo,
  374. const void *data, size_t dlen, uint32_t core_id, uint32_t signal_id)
  375. {
  376. unsigned int head;
  377. head = le32_to_cpu(*tx_fifo->head);
  378. head = ipcmem_tx_write_one(tx_fifo, head, data, dlen);
  379. head = ALIGN(head, 8);
  380. if (head >= tx_fifo->length)
  381. head -= tx_fifo->length;
  382. /* Ensure ordering of fifo and head update */
  383. wmb();
  384. *tx_fifo->head = cpu_to_le32(head);
  385. IPCLITE_OS_LOG(IPCLITE_DBG, "head : %d core_id : %d signal_id : %d\n",
  386. *tx_fifo->head, core_id, signal_id);
  387. /* Storing the debug data in debug structures */
  388. if (ipclite_debug_control & IPCLITE_DBG_STRUCT) {
  389. ipclite_dbg_struct->dbg_info_host[core_id].prev_tx_wr_index[1] =
  390. ipclite_dbg_struct->dbg_info_host[core_id].prev_tx_wr_index[0];
  391. ipclite_dbg_struct->dbg_info_host[core_id].prev_tx_wr_index[0] =
  392. ipclite_dbg_struct->dbg_info_host[core_id].tx_wr_index;
  393. ipclite_dbg_struct->dbg_info_host[core_id].tx_wr_index = *tx_fifo->head;
  394. ipclite_dbg_struct->dbg_info_host[core_id].prev_tx_rd_index[1] =
  395. ipclite_dbg_struct->dbg_info_host[core_id].prev_tx_rd_index[0];
  396. ipclite_dbg_struct->dbg_info_host[core_id].prev_tx_rd_index[0] =
  397. ipclite_dbg_struct->dbg_info_host[core_id].tx_rd_index;
  398. ipclite_dbg_struct->dbg_info_host[core_id].tx_rd_index = *tx_fifo->tail;
  399. ipclite_dbg_struct->dbg_info_overall.total_numsig_sent++;
  400. ipclite_dbg_struct->dbg_info_host[core_id].numsig_sent++;
  401. ipclite_dbg_struct->dbg_info_overall.last_sent_host_id = core_id;
  402. ipclite_dbg_struct->dbg_info_overall.last_sigid_sent = signal_id;
  403. }
  404. }
  405. static size_t ipclite_rx_avail(struct ipclite_channel *channel)
  406. {
  407. return channel->rx_fifo->avail(channel->rx_fifo);
  408. }
  409. static void ipclite_rx_peak(struct ipclite_channel *channel,
  410. void *data, size_t count)
  411. {
  412. channel->rx_fifo->peak(channel->rx_fifo, data, count);
  413. }
  414. static void ipclite_rx_advance(struct ipclite_channel *channel,
  415. size_t count)
  416. {
  417. channel->rx_fifo->advance(channel->rx_fifo, count, channel->remote_pid);
  418. }
  419. static size_t ipclite_tx_avail(struct ipclite_channel *channel)
  420. {
  421. return channel->tx_fifo->avail(channel->tx_fifo);
  422. }
  423. static void ipclite_tx_write(struct ipclite_channel *channel,
  424. const void *data, size_t dlen)
  425. {
  426. channel->tx_fifo->write(channel->tx_fifo, data, dlen, channel->remote_pid,
  427. channel->irq_info->signal_id);
  428. }
  429. static int ipclite_rx_data(struct ipclite_channel *channel, size_t avail)
  430. {
  431. uint64_t data;
  432. int ret = 0;
  433. if (avail < sizeof(data)) {
  434. IPCLITE_OS_LOG(IPCLITE_ERR, "Not enough data in fifo, Core : %d Signal : %d\n",
  435. channel->remote_pid, channel->irq_info->signal_id);
  436. return -EAGAIN;
  437. }
  438. ipclite_rx_peak(channel, &data, sizeof(data));
  439. if (synx_client.reg_complete == 1) {
  440. if (synx_client.callback)
  441. synx_client.callback(channel->remote_pid, data,
  442. synx_client.priv_data);
  443. }
  444. ipclite_rx_advance(channel, ALIGN(sizeof(data), 8));
  445. return ret;
  446. }
  447. static int ipclite_rx_test_data(struct ipclite_channel *channel, size_t avail)
  448. {
  449. uint64_t data;
  450. int ret = 0;
  451. if (avail < sizeof(data)) {
  452. IPCLITE_OS_LOG(IPCLITE_ERR, "Not enough data in fifo, Core : %d Signal : %d\n",
  453. channel->remote_pid, channel->irq_info->signal_id);
  454. return -EAGAIN;
  455. }
  456. ipclite_rx_peak(channel, &data, sizeof(data));
  457. if (test_client.reg_complete == 1) {
  458. if (test_client.callback)
  459. test_client.callback(channel->remote_pid, data,
  460. test_client.priv_data);
  461. }
  462. ipclite_rx_advance(channel, ALIGN(sizeof(data), 8));
  463. return ret;
  464. }
  465. static irqreturn_t ipclite_intr(int irq, void *data)
  466. {
  467. struct ipclite_channel *channel;
  468. struct ipclite_irq_info *irq_info;
  469. unsigned int avail = 0;
  470. int ret = 0;
  471. uint64_t msg;
  472. irq_info = (struct ipclite_irq_info *)data;
  473. channel = container_of(irq_info, struct ipclite_channel, irq_info[irq_info->signal_id]);
  474. IPCLITE_OS_LOG(IPCLITE_DBG, "Interrupt received from Core : %d Signal : %d\n",
  475. channel->remote_pid, irq_info->signal_id);
  476. /* Storing the debug data in debug structures */
  477. if (ipclite_debug_control & IPCLITE_DBG_STRUCT) {
  478. ipclite_dbg_struct->dbg_info_host[channel->remote_pid].num_intr++;
  479. ipclite_dbg_struct->dbg_info_overall.last_recv_host_id = channel->remote_pid;
  480. ipclite_dbg_struct->dbg_info_overall.last_sigid_recv = irq_info->signal_id;
  481. }
  482. if (irq_info->signal_id == IPCLITE_MSG_SIGNAL) {
  483. for (;;) {
  484. avail = ipclite_rx_avail(channel);
  485. if (avail < sizeof(msg))
  486. break;
  487. ret = ipclite_rx_data(channel, avail);
  488. }
  489. IPCLITE_OS_LOG(IPCLITE_DBG, "checking messages in rx_fifo done\n");
  490. } else if (irq_info->signal_id == IPCLITE_VERSION_SIGNAL) {
  491. IPCLITE_OS_LOG(IPCLITE_DBG, "Versioning is currently not enabled\n");
  492. } else if (irq_info->signal_id == IPCLITE_TEST_SIGNAL) {
  493. for (;;) {
  494. avail = ipclite_rx_avail(channel);
  495. if (avail < sizeof(msg))
  496. break;
  497. ret = ipclite_rx_test_data(channel, avail);
  498. }
  499. IPCLITE_OS_LOG(IPCLITE_DBG, "checking messages in rx_fifo done\n");
  500. } else {
  501. IPCLITE_OS_LOG(IPCLITE_ERR, "Wrong Interrupt Signal from core : %d signal : %d\n",
  502. channel->remote_pid, irq_info->signal_id);
  503. }
  504. return IRQ_HANDLED;
  505. }
  506. static int ipclite_tx(struct ipclite_channel *channel,
  507. uint64_t data, size_t dlen, uint32_t ipclite_signal)
  508. {
  509. unsigned long flags;
  510. int ret = 0;
  511. if (channel->status != ACTIVE) {
  512. if (channel->status == IN_PROGRESS && *channel->gstatus_ptr == ACTIVE) {
  513. channel->status = ACTIVE;
  514. } else {
  515. IPCLITE_OS_LOG(IPCLITE_ERR, "Cannot Send, Channel not active\n");
  516. return -EOPNOTSUPP;
  517. }
  518. }
  519. spin_lock_irqsave(&channel->tx_lock, flags);
  520. if (ipclite_tx_avail(channel) < dlen) {
  521. spin_unlock_irqrestore(&channel->tx_lock, flags);
  522. ret = -EAGAIN;
  523. return ret;
  524. }
  525. ipclite_tx_write(channel, &data, dlen);
  526. mbox_send_message(channel->irq_info[ipclite_signal].mbox_chan, NULL);
  527. mbox_client_txdone(channel->irq_info[ipclite_signal].mbox_chan, 0);
  528. spin_unlock_irqrestore(&channel->tx_lock, flags);
  529. return ret;
  530. }
  531. static int ipclite_send_debug_info(int32_t proc_id)
  532. {
  533. int ret = 0;
  534. struct ipclite_channel *channel;
  535. if (proc_id < 0 || proc_id >= IPCMEM_NUM_HOSTS) {
  536. IPCLITE_OS_LOG(IPCLITE_ERR, "Invalid proc_id : %d\n", proc_id);
  537. return -EINVAL;
  538. }
  539. channel = &ipclite->channel[proc_id];
  540. if (channel->status != ACTIVE) {
  541. if (channel->status == IN_PROGRESS && *channel->gstatus_ptr == ACTIVE) {
  542. channel->status = ACTIVE;
  543. } else {
  544. IPCLITE_OS_LOG(IPCLITE_ERR, "Cannot Send, Core %d is Inactive\n", proc_id);
  545. return -EOPNOTSUPP;
  546. }
  547. }
  548. ret = mbox_send_message(channel->irq_info[IPCLITE_DEBUG_SIGNAL].mbox_chan, NULL);
  549. if (ret < 0) {
  550. IPCLITE_OS_LOG(IPCLITE_ERR,
  551. "Debug Signal sending failed to Core : %d Signal : %d ret : %d\n",
  552. proc_id, IPCLITE_DEBUG_SIGNAL, ret);
  553. return ret;
  554. }
  555. IPCLITE_OS_LOG(IPCLITE_DBG,
  556. "Debug Signal send completed to core : %d signal : %d ret : %d\n",
  557. proc_id, IPCLITE_DEBUG_SIGNAL, ret);
  558. return 0;
  559. }
  560. int ipclite_ssr_update(int32_t proc_id)
  561. {
  562. int ret = 0;
  563. struct ipclite_channel *channel;
  564. if (proc_id < 0 || proc_id >= IPCMEM_NUM_HOSTS) {
  565. IPCLITE_OS_LOG(IPCLITE_ERR, "Invalid proc_id : %d\n", proc_id);
  566. return -EINVAL;
  567. }
  568. channel = &ipclite->channel[proc_id];
  569. if (channel->status != ACTIVE) {
  570. if (channel->status == IN_PROGRESS && *channel->gstatus_ptr == ACTIVE) {
  571. channel->status = ACTIVE;
  572. } else {
  573. IPCLITE_OS_LOG(IPCLITE_ERR, "Cannot Send, Core %d is Inactive\n", proc_id);
  574. return -EOPNOTSUPP;
  575. }
  576. }
  577. ret = mbox_send_message(channel->irq_info[IPCLITE_SSR_SIGNAL].mbox_chan, NULL);
  578. if (ret < 0) {
  579. IPCLITE_OS_LOG(IPCLITE_ERR,
  580. "SSR Signal sending failed to Core : %d Signal : %d ret : %d\n",
  581. proc_id, IPCLITE_SSR_SIGNAL, ret);
  582. return ret;
  583. }
  584. IPCLITE_OS_LOG(IPCLITE_DBG,
  585. "SSR Signal send completed to core : %d signal : %d ret : %d\n",
  586. proc_id, IPCLITE_SSR_SIGNAL, ret);
  587. return 0;
  588. }
  589. void ipclite_recover(enum ipcmem_host_type core_id)
  590. {
  591. int ret, host, host0, host1;
  592. uint32_t p;
  593. IPCLITE_OS_LOG(IPCLITE_DBG, "IPCLite Recover - Crashed Core : %d\n", core_id);
  594. /* verify and reset the hw mutex lock */
  595. if (core_id == ipclite->ipcmem.toc_data.host_info->hwlock_owner) {
  596. ipclite->ipcmem.toc_data.host_info->hwlock_owner = IPCMEM_INVALID_HOST;
  597. hwspin_unlock_raw(ipclite->hwlock);
  598. IPCLITE_OS_LOG(IPCLITE_DBG, "HW Lock Reset\n");
  599. }
  600. mutex_lock(&ssr_mutex);
  601. /* Set the Global Channel Status to 0 to avoid Race condition */
  602. for (p = 0; p < partitions; p++) {
  603. host0 = ipclite->ipcmem.toc_data.partition_entry[p].host0;
  604. host1 = ipclite->ipcmem.toc_data.partition_entry[p].host1;
  605. if (host0 != core_id && host1 != core_id)
  606. continue;
  607. ipclite_global_atomic_store_i32((ipclite_atomic_int32_t *)
  608. (&(ipclite->ipcmem.partition[p]->hdr.status)), 0);
  609. IPCLITE_OS_LOG(IPCLITE_DBG, "Global Channel Status : [%d][%d] : %d\n",
  610. host0, host1, ipclite->ipcmem.partition[p]->hdr.status);
  611. }
  612. /* Resets the TX/RX queue */
  613. *(ipclite->channel[core_id].tx_fifo->head) = 0;
  614. *(ipclite->channel[core_id].rx_fifo->tail) = 0;
  615. IPCLITE_OS_LOG(IPCLITE_DBG, "TX Fifo Reset : %d\n",
  616. *(ipclite->channel[core_id].tx_fifo->head));
  617. IPCLITE_OS_LOG(IPCLITE_DBG, "RX Fifo Reset : %d\n",
  618. *(ipclite->channel[core_id].rx_fifo->tail));
  619. /* Increment the Global Channel Status for APPS and crashed core*/
  620. ipclite_global_atomic_inc((ipclite_atomic_int32_t *)
  621. ipclite->channel[core_id].gstatus_ptr);
  622. ipclite->channel[core_id].status = *ipclite->channel[core_id].gstatus_ptr;
  623. /* Update other cores about SSR */
  624. for (host = 1; host < IPCMEM_NUM_HOSTS; host++) {
  625. if (!is_host_enabled(host) || host == core_id)
  626. continue;
  627. ret = ipclite_ssr_update(host);
  628. if (ret < 0)
  629. IPCLITE_OS_LOG(IPCLITE_ERR, "Failed to send SSR update to core %d\n", host);
  630. else
  631. IPCLITE_OS_LOG(IPCLITE_DBG, "SSR update sent to core %d\n", host);
  632. }
  633. mutex_unlock(&ssr_mutex);
  634. /* Dump the debug information */
  635. if (ipclite_debug_dump & IPCLITE_DUMP_SSR) {
  636. ipclite_dump_debug_struct();
  637. ipclite_dump_inmem_logs();
  638. }
  639. return;
  640. }
  641. EXPORT_SYMBOL(ipclite_recover);
  642. int ipclite_msg_send(int32_t proc_id, uint64_t data)
  643. {
  644. int ret = 0;
  645. if (proc_id < 0 || proc_id >= IPCMEM_NUM_HOSTS) {
  646. IPCLITE_OS_LOG(IPCLITE_ERR, "Invalid proc_id : %d\n", proc_id);
  647. return -EINVAL;
  648. }
  649. ret = ipclite_tx(&ipclite->channel[proc_id], data, sizeof(data),
  650. IPCLITE_MSG_SIGNAL);
  651. IPCLITE_OS_LOG(IPCLITE_DBG, "Message send complete to core : %d signal : %d ret : %d\n",
  652. proc_id, IPCLITE_MSG_SIGNAL, ret);
  653. return ret;
  654. }
  655. EXPORT_SYMBOL(ipclite_msg_send);
  656. int ipclite_register_client(IPCLite_Client cb_func_ptr, void *priv)
  657. {
  658. if (!cb_func_ptr) {
  659. IPCLITE_OS_LOG(IPCLITE_ERR, "Invalid callback pointer\n");
  660. return -EINVAL;
  661. }
  662. synx_client.callback = cb_func_ptr;
  663. synx_client.priv_data = priv;
  664. synx_client.reg_complete = 1;
  665. IPCLITE_OS_LOG(IPCLITE_DBG, "Client Registration completed\n");
  666. return 0;
  667. }
  668. EXPORT_SYMBOL(ipclite_register_client);
  669. int ipclite_test_msg_send(int32_t proc_id, uint64_t data)
  670. {
  671. int ret = 0;
  672. if (proc_id < 0 || proc_id >= IPCMEM_NUM_HOSTS) {
  673. IPCLITE_OS_LOG(IPCLITE_ERR, "Invalid proc_id : %d\n", proc_id);
  674. return -EINVAL;
  675. }
  676. ret = ipclite_tx(&ipclite->channel[proc_id], data, sizeof(data),
  677. IPCLITE_TEST_SIGNAL);
  678. IPCLITE_OS_LOG(IPCLITE_DBG, "Test Msg send complete to core : %d signal : %d ret : %d\n",
  679. proc_id, IPCLITE_TEST_SIGNAL, ret);
  680. return ret;
  681. }
  682. EXPORT_SYMBOL(ipclite_test_msg_send);
  683. int ipclite_register_test_client(IPCLite_Client cb_func_ptr, void *priv)
  684. {
  685. if (!cb_func_ptr) {
  686. IPCLITE_OS_LOG(IPCLITE_ERR, "Invalid callback pointer\n");
  687. return -EINVAL;
  688. }
  689. test_client.callback = cb_func_ptr;
  690. test_client.priv_data = priv;
  691. test_client.reg_complete = 1;
  692. IPCLITE_OS_LOG(IPCLITE_DBG, "Test Client Registration Completed\n");
  693. return 0;
  694. }
  695. EXPORT_SYMBOL(ipclite_register_test_client);
  696. static int map_ipcmem(struct ipclite_info *ipclite, const char *name)
  697. {
  698. struct device *dev;
  699. struct device_node *np;
  700. struct resource r;
  701. int ret = 0;
  702. dev = ipclite->dev;
  703. np = of_parse_phandle(dev->of_node, name, 0);
  704. if (!np) {
  705. IPCLITE_OS_LOG(IPCLITE_ERR, "No %s specified\n", name);
  706. return -EINVAL;
  707. }
  708. ret = of_address_to_resource(np, 0, &r);
  709. of_node_put(np);
  710. if (ret)
  711. return ret;
  712. ipclite->ipcmem.mem.aux_base = (u64)r.start;
  713. ipclite->ipcmem.mem.size = resource_size(&r);
  714. ipclite->ipcmem.mem.virt_base = devm_ioremap_wc(dev, r.start,
  715. resource_size(&r));
  716. if (!ipclite->ipcmem.mem.virt_base)
  717. return -ENOMEM;
  718. IPCLITE_OS_LOG(IPCLITE_DBG, "aux_base = %lx, size=%d,virt_base=%p\n",
  719. ipclite->ipcmem.mem.aux_base, ipclite->ipcmem.mem.size,
  720. ipclite->ipcmem.mem.virt_base);
  721. return ret;
  722. }
  723. /**
  724. * insert_magic_number() - Inserts the magic number in toc header
  725. *
  726. * Function computes a simple checksum of the contents in toc header
  727. * and stores the result in magic_number field in the toc header
  728. */
  729. static void insert_magic_number(void)
  730. {
  731. uint32_t *block = ipclite->ipcmem.mem.virt_base;
  732. size_t size = sizeof(struct ipcmem_toc_header) / sizeof(uint32_t);
  733. for (int i = 1; i < size; i++)
  734. block[0] ^= block[i];
  735. block[0] = ~block[0];
  736. }
  737. static int32_t setup_toc(struct ipclite_mem *ipcmem)
  738. {
  739. size_t offset = 0;
  740. void *virt_base = ipcmem->mem.virt_base;
  741. struct ipcmem_offsets *offsets = &ipcmem->toc->offsets;
  742. struct ipcmem_toc_data *toc_data = &ipcmem->toc_data;
  743. /* Setup Offsets */
  744. offsets->host_info = offset += IPCMEM_TOC_VAR_OFFSET;
  745. offsets->global_entry = offset += sizeof(struct ipcmem_host_info);
  746. offsets->partition_info = offset += sizeof(struct ipcmem_partition_entry);
  747. offsets->partition_entry = offset += sizeof(struct ipcmem_partition_info);
  748. // offsets->debug = virt_base + size - 64K;
  749. /* Offset to be used for any new structure added in toc (after partition_entry) */
  750. // offsets->new_struct = offset += sizeof(struct ipcmem_partition_entry)*IPCMEM_NUM_HOSTS;
  751. IPCLITE_OS_LOG(IPCLITE_DBG, "toc_data offsets:");
  752. IPCLITE_OS_LOG(IPCLITE_DBG, "host_info = 0x%X", offsets->host_info);
  753. IPCLITE_OS_LOG(IPCLITE_DBG, "global_entry = 0x%X", offsets->global_entry);
  754. IPCLITE_OS_LOG(IPCLITE_DBG, "partition_info = 0x%X", offsets->partition_info);
  755. IPCLITE_OS_LOG(IPCLITE_DBG, "partition_entry = 0x%X", offsets->partition_entry);
  756. /* Point structures to the appropriate offset in TOC */
  757. toc_data->host_info = ADD_OFFSET(virt_base, offsets->host_info);
  758. toc_data->global_entry = ADD_OFFSET(virt_base, offsets->global_entry);
  759. toc_data->partition_info = ADD_OFFSET(virt_base, offsets->partition_info);
  760. toc_data->partition_entry = ADD_OFFSET(virt_base, offsets->partition_entry);
  761. return 0;
  762. }
  763. static void setup_global_partition(struct ipclite_mem *ipcmem, uint32_t base_offset)
  764. {
  765. /*Fill in global partition details*/
  766. ipcmem->toc_data.global_entry->base_offset = base_offset;
  767. ipcmem->toc_data.global_entry->size = GLOBAL_PARTITION_SIZE;
  768. ipcmem->toc_data.global_entry->flags = GLOBAL_PARTITION_FLAGS;
  769. ipcmem->toc_data.global_entry->host0 = IPCMEM_GLOBAL_HOST;
  770. ipcmem->toc_data.global_entry->host1 = IPCMEM_GLOBAL_HOST;
  771. ipcmem->global_partition = ADD_OFFSET(ipcmem->mem.virt_base, base_offset);
  772. IPCLITE_OS_LOG(IPCLITE_DBG, "base_offset =%x,ipcmem->global_partition = %p\n",
  773. base_offset,
  774. ipcmem->global_partition);
  775. ipcmem->global_partition->hdr = global_partition_hdr;
  776. IPCLITE_OS_LOG(IPCLITE_DBG, "hdr.type = %x,hdr.offset = %x,hdr.size = %d\n",
  777. ipcmem->global_partition->hdr.partition_type,
  778. ipcmem->global_partition->hdr.region_offset,
  779. ipcmem->global_partition->hdr.region_size);
  780. }
  781. static void update_partition(struct ipclite_mem *ipcmem, uint32_t p)
  782. {
  783. int host0 = ipcmem->toc_data.partition_entry[p].host0;
  784. int host1 = ipcmem->toc_data.partition_entry[p].host1;
  785. IPCLITE_OS_LOG(IPCLITE_DBG, "host0 = %d, host1=%d\n", host0, host1);
  786. ipcmem->partition[p] = ADD_OFFSET(ipcmem->mem.virt_base,
  787. ipcmem->toc_data.partition_entry[p].base_offset);
  788. IPCLITE_OS_LOG(IPCLITE_DBG, "partition[%d] = %p,partition_base_offset[%d]=%lx",
  789. p, ipcmem->partition[p],
  790. p, ipcmem->toc_data.partition_entry[p].base_offset);
  791. if (host0 == host1)
  792. ipcmem->partition[p]->hdr = loopback_partition_hdr;
  793. else
  794. ipcmem->partition[p]->hdr = default_partition_hdr;
  795. IPCLITE_OS_LOG(IPCLITE_DBG, "hdr.type = %x,hdr.offset = %x,hdr.size = %d",
  796. ipcmem->partition[p]->hdr.type,
  797. ipcmem->partition[p]->hdr.desc_offset,
  798. ipcmem->partition[p]->hdr.desc_size);
  799. }
  800. static int32_t setup_partitions(struct ipclite_mem *ipcmem, uint32_t base_offset)
  801. {
  802. uint32_t p, host0, host1;
  803. uint32_t num_entry = 0;
  804. /*Fill in each valid ipcmem partition table entry*/
  805. for (host0 = 0; host0 < IPCMEM_NUM_HOSTS; host0++) {
  806. if (!is_host_enabled(host0))
  807. continue;
  808. for (host1 = host0; host1 < IPCMEM_NUM_HOSTS; host1++) {
  809. if (!is_host_enabled(host1) || is_loopback_except_apps(host0, host1))
  810. continue;
  811. ipcmem->toc_data.partition_entry[num_entry].base_offset = base_offset;
  812. ipcmem->toc_data.partition_entry[num_entry].size = DEFAULT_PARTITION_SIZE;
  813. ipcmem->toc_data.partition_entry[num_entry].flags = DEFAULT_PARTITION_FLAGS;
  814. ipcmem->toc_data.partition_entry[num_entry].host0 = host0;
  815. ipcmem->toc_data.partition_entry[num_entry].host1 = host1;
  816. base_offset += DEFAULT_PARTITION_SIZE;
  817. num_entry++;
  818. }
  819. }
  820. IPCLITE_OS_LOG(IPCLITE_DBG, "total partitions = %u", num_entry);
  821. ipcmem->partition = kcalloc(num_entry, sizeof(*ipcmem->partition), GFP_KERNEL);
  822. if (!ipcmem->partition) {
  823. IPCLITE_OS_LOG(IPCLITE_ERR, "Partition Allocation failed");
  824. return -ENOMEM;
  825. }
  826. /*Update appropriate partition based on partition entries*/
  827. for (p = 0; p < num_entry; p++)
  828. update_partition(ipcmem, p);
  829. /*Set up info to parse partition entries*/
  830. ipcmem->toc_data.partition_info->num_entries = partitions = num_entry;
  831. ipcmem->toc_data.partition_info->entry_size = sizeof(struct ipcmem_partition_entry);
  832. return 0;
  833. }
  834. static int32_t ipcmem_init(struct ipclite_mem *ipcmem, struct device_node *pn)
  835. {
  836. int ret;
  837. uint32_t remote_pid;
  838. uint32_t host_count = 0;
  839. uint32_t gmem_offset = 0;
  840. struct device_node *cn;
  841. for_each_available_child_of_node(pn, cn) {
  842. of_property_read_u32(cn, "qcom,remote-pid", &remote_pid);
  843. if (remote_pid < IPCMEM_NUM_HOSTS) {
  844. enabled_hosts |= BIT_MASK(remote_pid);
  845. host_count++;
  846. }
  847. }
  848. IPCLITE_OS_LOG(IPCLITE_DBG, "enabled_hosts = 0x%X", enabled_hosts);
  849. IPCLITE_OS_LOG(IPCLITE_DBG, "host_count = %u", host_count);
  850. ipcmem->toc = ipcmem->mem.virt_base;
  851. IPCLITE_OS_LOG(IPCLITE_DBG, "toc_base = %p\n", ipcmem->toc);
  852. ret = setup_toc(ipcmem);
  853. if (ret) {
  854. IPCLITE_OS_LOG(IPCLITE_ERR, "Failed to set up toc");
  855. return ret;
  856. }
  857. /*Set up host related info*/
  858. ipcmem->toc_data.host_info->hwlock_owner = IPCMEM_INVALID_HOST;
  859. ipcmem->toc_data.host_info->configured_host = enabled_hosts;
  860. gmem_offset += IPCMEM_TOC_SIZE;
  861. setup_global_partition(ipcmem, gmem_offset);
  862. gmem_offset += GLOBAL_PARTITION_SIZE;
  863. ret = setup_partitions(ipcmem, gmem_offset);
  864. if (ret) {
  865. IPCLITE_OS_LOG(IPCLITE_ERR, "Failed to set up partitions");
  866. return ret;
  867. }
  868. /*Making sure all writes for ipcmem configurations are completed*/
  869. wmb();
  870. ipcmem->toc->hdr.init_done = IPCMEM_INIT_COMPLETED;
  871. IPCLITE_OS_LOG(IPCLITE_DBG, "Ipcmem init completed\n");
  872. return 0;
  873. }
  874. static int ipclite_channel_irq_init(struct device *parent, struct device_node *node,
  875. struct ipclite_channel *channel)
  876. {
  877. int ret = 0;
  878. u32 index;
  879. struct ipclite_irq_info *irq_info;
  880. struct device *dev;
  881. char strs[MAX_CHANNEL_SIGNALS][IPCLITE_SIGNAL_LABEL_SIZE] = {
  882. "msg", "mem-init", "version", "test", "ssr", "debug"};
  883. dev = kzalloc(sizeof(*dev), GFP_KERNEL);
  884. if (!dev)
  885. return -ENOMEM;
  886. dev->parent = parent;
  887. dev->of_node = node;
  888. dev_set_name(dev, "%s:%pOFn", dev_name(parent->parent), node);
  889. IPCLITE_OS_LOG(IPCLITE_DBG, "Registering %s device\n", dev_name(parent->parent));
  890. ret = device_register(dev);
  891. if (ret) {
  892. IPCLITE_OS_LOG(IPCLITE_ERR, "failed to register ipclite child node\n");
  893. put_device(dev);
  894. return ret;
  895. }
  896. ret = of_property_read_u32(dev->of_node, "index",
  897. &index);
  898. if (ret) {
  899. IPCLITE_OS_LOG(IPCLITE_ERR, "failed to parse index\n");
  900. goto err_dev;
  901. }
  902. irq_info = &channel->irq_info[index];
  903. IPCLITE_OS_LOG(IPCLITE_DBG, "irq_info[%d]=%p\n", index, irq_info);
  904. irq_info->mbox_client.dev = dev;
  905. irq_info->mbox_client.knows_txdone = true;
  906. irq_info->mbox_chan = mbox_request_channel(&irq_info->mbox_client, 0);
  907. IPCLITE_OS_LOG(IPCLITE_DBG, "irq_info[%d].mbox_chan=%p\n", index, irq_info->mbox_chan);
  908. if (IS_ERR(irq_info->mbox_chan)) {
  909. if (PTR_ERR(irq_info->mbox_chan) != -EPROBE_DEFER)
  910. IPCLITE_OS_LOG(IPCLITE_ERR, "failed to acquire IPC channel\n");
  911. goto err_dev;
  912. }
  913. snprintf(irq_info->irqname, 32, "ipclite-signal-%s", strs[index]);
  914. irq_info->irq = of_irq_get(dev->of_node, 0);
  915. IPCLITE_OS_LOG(IPCLITE_DBG, "irq[%d] = %d\n", index, irq_info->irq);
  916. irq_info->signal_id = index;
  917. ret = devm_request_irq(dev, irq_info->irq,
  918. ipclite_intr,
  919. IRQF_NO_SUSPEND | IRQF_SHARED,
  920. irq_info->irqname, irq_info);
  921. if (ret) {
  922. IPCLITE_OS_LOG(IPCLITE_ERR, "failed to request IRQ\n");
  923. goto err_dev;
  924. }
  925. IPCLITE_OS_LOG(IPCLITE_DBG, "Interrupt init completed, ret = %d\n", ret);
  926. return 0;
  927. err_dev:
  928. device_unregister(dev);
  929. kfree(dev);
  930. return ret;
  931. }
  932. int32_t get_global_partition_info(struct global_region_info *global_ipcmem)
  933. {
  934. struct ipcmem_global_partition *global_partition;
  935. /* Check added to verify ipclite is initialized */
  936. if (!ipclite) {
  937. IPCLITE_OS_LOG(IPCLITE_ERR, "IPCLite not initialized\n");
  938. return -ENOMEM;
  939. }
  940. if (!global_ipcmem)
  941. return -EINVAL;
  942. global_partition = ipclite->ipcmem.global_partition;
  943. global_ipcmem->virt_base = (void *)((char *)global_partition +
  944. global_partition->hdr.region_offset);
  945. global_ipcmem->size = (size_t)(global_partition->hdr.region_size);
  946. IPCLITE_OS_LOG(IPCLITE_DBG, "base = %p, size=%lx\n", global_ipcmem->virt_base,
  947. global_ipcmem->size);
  948. return 0;
  949. }
  950. EXPORT_SYMBOL(get_global_partition_info);
  951. static struct ipcmem_partition_header *get_ipcmem_partition_hdr(struct ipclite_mem ipcmem, int local_pid,
  952. int remote_pid)
  953. {
  954. uint32_t p;
  955. uint32_t found = -1;
  956. for (p = 0; p < partitions; p++) {
  957. if (ipcmem.toc_data.partition_entry[p].host0 == local_pid
  958. && ipcmem.toc_data.partition_entry[p].host1 == remote_pid) {
  959. found = p;
  960. break;
  961. }
  962. }
  963. if (found < partitions)
  964. return (struct ipcmem_partition_header *)((char *)ipcmem.mem.virt_base +
  965. ipcmem.toc_data.partition_entry[found].base_offset);
  966. else
  967. return NULL;
  968. }
  969. static void ipclite_channel_release(struct device *dev)
  970. {
  971. IPCLITE_OS_LOG(IPCLITE_INFO, "Releasing ipclite channel\n");
  972. kfree(dev);
  973. }
  974. /* Sets up following fields of IPCLite channel structure:
  975. * remote_pid,tx_fifo, rx_fifo
  976. */
  977. static int ipclite_channel_init(struct device *parent,
  978. struct device_node *node)
  979. {
  980. struct ipclite_fifo *rx_fifo;
  981. struct ipclite_fifo *tx_fifo;
  982. struct device *dev;
  983. u32 local_pid, remote_pid, global_atomic;
  984. u32 *descs;
  985. int ret = 0;
  986. struct device_node *child;
  987. struct ipcmem_partition_header *partition_hdr;
  988. dev = kzalloc(sizeof(*dev), GFP_KERNEL);
  989. if (!dev)
  990. return -ENOMEM;
  991. dev->parent = parent;
  992. dev->of_node = node;
  993. dev->release = ipclite_channel_release;
  994. dev_set_name(dev, "%s:%pOFn", dev_name(parent->parent), node);
  995. IPCLITE_OS_LOG(IPCLITE_DBG, "Registering %s device\n", dev_name(parent->parent));
  996. ret = device_register(dev);
  997. if (ret) {
  998. IPCLITE_OS_LOG(IPCLITE_ERR, "failed to register ipclite device\n");
  999. put_device(dev);
  1000. kfree(dev);
  1001. return ret;
  1002. }
  1003. local_pid = LOCAL_HOST;
  1004. ret = of_property_read_u32(dev->of_node, "qcom,remote-pid",
  1005. &remote_pid);
  1006. if (ret) {
  1007. dev_err(dev, "failed to parse qcom,remote-pid\n");
  1008. goto err_put_dev;
  1009. }
  1010. IPCLITE_OS_LOG(IPCLITE_DBG, "remote_pid = %d, local_pid=%d\n", remote_pid, local_pid);
  1011. ipclite_hw_mutex = devm_kzalloc(dev, sizeof(*ipclite_hw_mutex), GFP_KERNEL);
  1012. if (!ipclite_hw_mutex) {
  1013. ret = -ENOMEM;
  1014. goto err_put_dev;
  1015. }
  1016. ret = of_property_read_u32(dev->of_node, "global_atomic", &global_atomic);
  1017. if (ret) {
  1018. dev_err(dev, "failed to parse global_atomic\n");
  1019. goto err_put_dev;
  1020. }
  1021. if (global_atomic == 0)
  1022. global_atomic_support = GLOBAL_ATOMICS_DISABLED;
  1023. rx_fifo = devm_kzalloc(dev, sizeof(*rx_fifo), GFP_KERNEL);
  1024. tx_fifo = devm_kzalloc(dev, sizeof(*tx_fifo), GFP_KERNEL);
  1025. if (!rx_fifo || !tx_fifo) {
  1026. ret = -ENOMEM;
  1027. goto err_put_dev;
  1028. }
  1029. IPCLITE_OS_LOG(IPCLITE_DBG, "rx_fifo = %p, tx_fifo=%p\n", rx_fifo, tx_fifo);
  1030. partition_hdr = get_ipcmem_partition_hdr(ipclite->ipcmem, local_pid, remote_pid);
  1031. IPCLITE_OS_LOG(IPCLITE_DBG, "partition_hdr = %p\n", partition_hdr);
  1032. if (!partition_hdr) {
  1033. ret = -ENOMEM;
  1034. goto err_put_dev;
  1035. }
  1036. descs = (u32 *)((char *)partition_hdr + partition_hdr->desc_offset);
  1037. IPCLITE_OS_LOG(IPCLITE_DBG, "descs = %p\n", descs);
  1038. if (local_pid < remote_pid) {
  1039. tx_fifo->fifo = (char *)partition_hdr + partition_hdr->fifo0_offset;
  1040. tx_fifo->length = partition_hdr->fifo0_size;
  1041. rx_fifo->fifo = (char *)partition_hdr + partition_hdr->fifo1_offset;
  1042. rx_fifo->length = partition_hdr->fifo1_size;
  1043. tx_fifo->tail = &descs[0];
  1044. tx_fifo->head = &descs[1];
  1045. rx_fifo->tail = &descs[2];
  1046. rx_fifo->head = &descs[3];
  1047. } else {
  1048. tx_fifo->fifo = (char *)partition_hdr + partition_hdr->fifo1_offset;
  1049. tx_fifo->length = partition_hdr->fifo1_size;
  1050. rx_fifo->fifo = (char *)partition_hdr + partition_hdr->fifo0_offset;
  1051. rx_fifo->length = partition_hdr->fifo0_size;
  1052. rx_fifo->tail = &descs[0];
  1053. rx_fifo->head = &descs[1];
  1054. tx_fifo->tail = &descs[2];
  1055. tx_fifo->head = &descs[3];
  1056. }
  1057. if (partition_hdr->type == LOOPBACK_PARTITION_TYPE) {
  1058. rx_fifo->tail = tx_fifo->tail;
  1059. rx_fifo->head = tx_fifo->head;
  1060. }
  1061. /* rx_fifo->reset = ipcmem_rx_reset;*/
  1062. rx_fifo->avail = ipcmem_rx_avail;
  1063. rx_fifo->peak = ipcmem_rx_peak;
  1064. rx_fifo->advance = ipcmem_rx_advance;
  1065. /* tx_fifo->reset = ipcmem_tx_reset;*/
  1066. tx_fifo->avail = ipcmem_tx_avail;
  1067. tx_fifo->write = ipcmem_tx_write;
  1068. *rx_fifo->tail = 0;
  1069. *tx_fifo->head = 0;
  1070. /*Store Channel Information*/
  1071. ipclite->channel[remote_pid].remote_pid = remote_pid;
  1072. ipclite->channel[remote_pid].tx_fifo = tx_fifo;
  1073. ipclite->channel[remote_pid].rx_fifo = rx_fifo;
  1074. ipclite->channel[remote_pid].gstatus_ptr = &partition_hdr->status;
  1075. spin_lock_init(&ipclite->channel[remote_pid].tx_lock);
  1076. for_each_available_child_of_node(dev->of_node, child) {
  1077. ret = ipclite_channel_irq_init(dev, child,
  1078. &ipclite->channel[remote_pid]);
  1079. if (ret) {
  1080. IPCLITE_OS_LOG(IPCLITE_ERR, "irq setup for ipclite channel failed\n");
  1081. goto err_put_dev;
  1082. }
  1083. }
  1084. /* Updating Local & Global Channel Status */
  1085. if (remote_pid == IPCMEM_APPS) {
  1086. *ipclite->channel[remote_pid].gstatus_ptr = ACTIVE;
  1087. ipclite->channel[remote_pid].status = ACTIVE;
  1088. } else {
  1089. *ipclite->channel[remote_pid].gstatus_ptr = IN_PROGRESS;
  1090. ipclite->channel[remote_pid].status = IN_PROGRESS;
  1091. }
  1092. IPCLITE_OS_LOG(IPCLITE_DBG, "Channel init completed, ret = %d\n", ret);
  1093. return ret;
  1094. err_put_dev:
  1095. ipclite->channel[remote_pid].status = INACTIVE;
  1096. device_unregister(dev);
  1097. kfree(dev);
  1098. return ret;
  1099. }
  1100. static void probe_subsystem(struct device *dev, struct device_node *np)
  1101. {
  1102. int ret = 0;
  1103. ret = ipclite_channel_init(dev, np);
  1104. if (ret)
  1105. IPCLITE_OS_LOG(IPCLITE_ERR, "IPCLite Channel init failed\n");
  1106. }
  1107. static ssize_t ipclite_dbg_lvl_write(struct kobject *kobj,
  1108. struct kobj_attribute *attr, const char *buf, size_t count)
  1109. {
  1110. int ret = 0, host = 0;
  1111. /* Parse the string from Sysfs Interface */
  1112. ret = kstrtoint(buf, 0, &ipclite_debug_level);
  1113. if (ret < 0) {
  1114. IPCLITE_OS_LOG(IPCLITE_ERR, "Error parsing the sysfs value");
  1115. return ret;
  1116. }
  1117. /* Check if debug structure is initialized */
  1118. if (!ipclite_dbg_info) {
  1119. IPCLITE_OS_LOG(IPCLITE_ERR, "Debug structures not initialized\n");
  1120. return -ENOMEM;
  1121. }
  1122. /* Update the Global Debug variable for FW cores */
  1123. ipclite_dbg_info->debug_level = ipclite_debug_level;
  1124. /* Memory Barrier to make sure all writes are completed */
  1125. wmb();
  1126. /* Signal other cores for updating the debug information */
  1127. for (host = 1; host < IPCMEM_NUM_HOSTS; host++) {
  1128. if (!is_host_enabled(host))
  1129. continue;
  1130. ret = ipclite_send_debug_info(host);
  1131. if (ret < 0)
  1132. IPCLITE_OS_LOG(IPCLITE_ERR, "Failed to send the debug info %d\n", host);
  1133. else
  1134. IPCLITE_OS_LOG(IPCLITE_DBG, "Debug info sent to host %d\n", host);
  1135. }
  1136. return count;
  1137. }
  1138. static ssize_t ipclite_dbg_ctrl_write(struct kobject *kobj,
  1139. struct kobj_attribute *attr, const char *buf, size_t count)
  1140. {
  1141. int ret = 0, host = 0;
  1142. /* Parse the string from Sysfs Interface */
  1143. ret = kstrtoint(buf, 0, &ipclite_debug_control);
  1144. if (ret < 0) {
  1145. IPCLITE_OS_LOG(IPCLITE_ERR, "Error parsing the sysfs value");
  1146. return ret;
  1147. }
  1148. /* Check if debug structures are initialized */
  1149. if (!ipclite_dbg_info || !ipclite_dbg_struct || !ipclite_dbg_inmem) {
  1150. IPCLITE_OS_LOG(IPCLITE_ERR, "Debug structures not initialized\n");
  1151. return -ENOMEM;
  1152. }
  1153. /* Update the Global Debug variable for FW cores */
  1154. ipclite_dbg_info->debug_control = ipclite_debug_control;
  1155. /* Memory Barrier to make sure all writes are completed */
  1156. wmb();
  1157. /* Signal other cores for updating the debug information */
  1158. for (host = 1; host < IPCMEM_NUM_HOSTS; host++) {
  1159. if (!is_host_enabled(host))
  1160. continue;
  1161. ret = ipclite_send_debug_info(host);
  1162. if (ret < 0)
  1163. IPCLITE_OS_LOG(IPCLITE_ERR, "Failed to send the debug info %d\n", host);
  1164. else
  1165. IPCLITE_OS_LOG(IPCLITE_DBG, "Debug info sent to host %d\n", host);
  1166. }
  1167. return count;
  1168. }
  1169. static ssize_t ipclite_dbg_dump_write(struct kobject *kobj,
  1170. struct kobj_attribute *attr, const char *buf, size_t count)
  1171. {
  1172. int ret = 0;
  1173. /* Parse the string from Sysfs Interface */
  1174. ret = kstrtoint(buf, 0, &ipclite_debug_dump);
  1175. if (ret < 0) {
  1176. IPCLITE_OS_LOG(IPCLITE_ERR, "Error parsing the sysfs value");
  1177. return ret;
  1178. }
  1179. /* Check if debug structures are initialized */
  1180. if (!ipclite_dbg_info || !ipclite_dbg_struct || !ipclite_dbg_inmem) {
  1181. IPCLITE_OS_LOG(IPCLITE_ERR, "Debug structures not initialized\n");
  1182. return -ENOMEM;
  1183. }
  1184. /* Dump the debug information */
  1185. if (ipclite_debug_dump & IPCLITE_DUMP_DBG_STRUCT)
  1186. ipclite_dump_debug_struct();
  1187. if (ipclite_debug_dump & IPCLITE_DUMP_INMEM_LOG)
  1188. ipclite_dump_inmem_logs();
  1189. return count;
  1190. }
  1191. struct kobj_attribute sysfs_dbg_lvl = __ATTR(ipclite_debug_level, 0660,
  1192. NULL, ipclite_dbg_lvl_write);
  1193. struct kobj_attribute sysfs_dbg_ctrl = __ATTR(ipclite_debug_control, 0660,
  1194. NULL, ipclite_dbg_ctrl_write);
  1195. struct kobj_attribute sysfs_dbg_dump = __ATTR(ipclite_debug_dump, 0660,
  1196. NULL, ipclite_dbg_dump_write);
  1197. static int ipclite_debug_sysfs_setup(void)
  1198. {
  1199. int ret = 0;
  1200. /* Creating a directory in /sys/kernel/ */
  1201. sysfs_kobj = kobject_create_and_add("ipclite", kernel_kobj);
  1202. if (!sysfs_kobj) {
  1203. IPCLITE_OS_LOG(IPCLITE_ERR, "Cannot create and add sysfs directory\n");
  1204. return -ENOMEM;
  1205. }
  1206. /* Creating sysfs files/interfaces for debug */
  1207. ret = sysfs_create_file(sysfs_kobj, &sysfs_dbg_lvl.attr);
  1208. if (ret) {
  1209. IPCLITE_OS_LOG(IPCLITE_ERR, "Cannot create sysfs debug level file\n");
  1210. return ret;
  1211. }
  1212. ret = sysfs_create_file(sysfs_kobj, &sysfs_dbg_ctrl.attr);
  1213. if (ret) {
  1214. IPCLITE_OS_LOG(IPCLITE_ERR, "Cannot create sysfs debug control file\n");
  1215. return ret;
  1216. }
  1217. ret = sysfs_create_file(sysfs_kobj, &sysfs_dbg_dump.attr);
  1218. if (ret) {
  1219. IPCLITE_OS_LOG(IPCLITE_ERR, "Cannot create sysfs debug dump file\n");
  1220. return ret;
  1221. }
  1222. return ret;
  1223. }
  1224. static int ipclite_debug_info_setup(void)
  1225. {
  1226. /* Setting up the Debug Structures */
  1227. ipclite_dbg_info = (struct ipclite_debug_info *)(((char *)ipclite->ipcmem.mem.virt_base +
  1228. ipclite->ipcmem.mem.size) - DEBUG_PARTITION_SIZE);
  1229. if (!ipclite_dbg_info)
  1230. return -EADDRNOTAVAIL;
  1231. ipclite_dbg_struct = (struct ipclite_debug_struct *)
  1232. (((char *)ipclite_dbg_info + IPCLITE_DEBUG_INFO_SIZE) +
  1233. (sizeof(*ipclite_dbg_struct) * IPCMEM_APPS));
  1234. if (!ipclite_dbg_struct)
  1235. return -EADDRNOTAVAIL;
  1236. ipclite_dbg_inmem = (struct ipclite_debug_inmem_buf *)
  1237. (((char *)ipclite_dbg_info + IPCLITE_DEBUG_INFO_SIZE) +
  1238. (sizeof(*ipclite_dbg_struct) * IPCMEM_NUM_HOSTS));
  1239. if (!ipclite_dbg_inmem)
  1240. return -EADDRNOTAVAIL;
  1241. IPCLITE_OS_LOG(IPCLITE_DBG, "virtual_base_ptr = %p total_size : %d debug_size : %d\n",
  1242. ipclite->ipcmem.mem.virt_base, ipclite->ipcmem.mem.size, DEBUG_PARTITION_SIZE);
  1243. IPCLITE_OS_LOG(IPCLITE_DBG, "dbg_info : %p dbg_struct : %p dbg_inmem : %p\n",
  1244. ipclite_dbg_info, ipclite_dbg_struct, ipclite_dbg_inmem);
  1245. return 0;
  1246. }
  1247. static int ipclite_probe(struct platform_device *pdev)
  1248. {
  1249. int ret = 0;
  1250. int hwlock_id;
  1251. struct ipcmem_region *mem;
  1252. struct device_node *cn;
  1253. struct device_node *pn = pdev->dev.of_node;
  1254. struct ipclite_channel broadcast;
  1255. ipclite = kzalloc(sizeof(*ipclite), GFP_KERNEL);
  1256. if (!ipclite) {
  1257. ret = -ENOMEM;
  1258. goto error;
  1259. }
  1260. ipclite->dev = &pdev->dev;
  1261. hwlock_id = of_hwspin_lock_get_id(pn, 0);
  1262. if (hwlock_id < 0) {
  1263. if (hwlock_id != -EPROBE_DEFER)
  1264. dev_err(&pdev->dev, "failed to retrieve hwlock\n");
  1265. ret = hwlock_id;
  1266. goto release;
  1267. }
  1268. IPCLITE_OS_LOG(IPCLITE_DBG, "Hwlock id retrieved, hwlock_id=%d\n", hwlock_id);
  1269. ipclite->hwlock = hwspin_lock_request_specific(hwlock_id);
  1270. if (!ipclite->hwlock) {
  1271. IPCLITE_OS_LOG(IPCLITE_ERR, "Failed to assign hwlock_id\n");
  1272. ret = -ENXIO;
  1273. goto release;
  1274. }
  1275. IPCLITE_OS_LOG(IPCLITE_DBG, "Hwlock id assigned successfully, hwlock=%p\n",
  1276. ipclite->hwlock);
  1277. /* Initializing Local Mutex Lock for SSR functionality */
  1278. mutex_init(&ssr_mutex);
  1279. ret = map_ipcmem(ipclite, "memory-region");
  1280. if (ret) {
  1281. IPCLITE_OS_LOG(IPCLITE_ERR, "failed to map ipcmem\n");
  1282. goto release;
  1283. }
  1284. mem = &(ipclite->ipcmem.mem);
  1285. memset(mem->virt_base, 0, mem->size);
  1286. ret = ipcmem_init(&ipclite->ipcmem, pn);
  1287. if (ret) {
  1288. IPCLITE_OS_LOG(IPCLITE_ERR, "Failed to set up IPCMEM");
  1289. goto release;
  1290. }
  1291. /* Set up sysfs for debug */
  1292. ret = ipclite_debug_sysfs_setup();
  1293. if (ret) {
  1294. IPCLITE_OS_LOG(IPCLITE_ERR, "Failed to Set up IPCLite Debug Sysfs\n");
  1295. goto release;
  1296. }
  1297. /* Mapping Debug Memory */
  1298. ret = ipclite_debug_info_setup();
  1299. if (ret) {
  1300. IPCLITE_OS_LOG(IPCLITE_ERR, "Failed to Set up IPCLite Debug Structures\n");
  1301. goto release;
  1302. }
  1303. /* Setup Channel for each Remote Subsystem */
  1304. for_each_available_child_of_node(pn, cn)
  1305. probe_subsystem(&pdev->dev, cn);
  1306. /* Broadcast init_done signal to all subsystems once mbox channels
  1307. * are set up
  1308. */
  1309. broadcast = ipclite->channel[IPCMEM_APPS];
  1310. ret = mbox_send_message(broadcast.irq_info[IPCLITE_MEM_INIT_SIGNAL].mbox_chan,
  1311. NULL);
  1312. if (ret < 0)
  1313. goto mem_release;
  1314. mbox_client_txdone(broadcast.irq_info[IPCLITE_MEM_INIT_SIGNAL].mbox_chan, 0);
  1315. if (global_atomic_support) {
  1316. ipclite->ipcmem.toc->hdr.feature_mask |= GLOBAL_ATOMIC_SUPPORT_BMSK;
  1317. }
  1318. IPCLITE_OS_LOG(IPCLITE_DBG, "global_atomic_support : %d\n", global_atomic_support);
  1319. /* Should be called after all Global TOC related init is done */
  1320. insert_magic_number();
  1321. /* hw mutex callbacks */
  1322. ipclite_hw_mutex->acquire = ipclite_hw_mutex_acquire;
  1323. ipclite_hw_mutex->release = ipclite_hw_mutex_release;
  1324. /* store to ipclite structure */
  1325. ipclite->ipclite_hw_mutex = ipclite_hw_mutex;
  1326. /* Update the Global Debug variable for FW cores */
  1327. ipclite_dbg_info->debug_level = ipclite_debug_level;
  1328. ipclite_dbg_info->debug_control = ipclite_debug_control;
  1329. IPCLITE_OS_LOG(IPCLITE_INFO, "IPCLite probe completed successfully\n");
  1330. return ret;
  1331. mem_release:
  1332. /* If the remote subsystem has already completed the init and actively
  1333. * using IPCMEM, re-assigning IPCMEM memory back to HLOS can lead to crash
  1334. * Solution: Either we don't take back the memory or make sure APPS completes
  1335. * init before any other subsystem initializes IPCLite (we won't have to send
  1336. * braodcast)
  1337. */
  1338. release:
  1339. kfree(ipclite);
  1340. ipclite = NULL;
  1341. error:
  1342. IPCLITE_OS_LOG(IPCLITE_ERR, "IPCLite probe failed\n");
  1343. return ret;
  1344. }
  1345. static const struct of_device_id ipclite_of_match[] = {
  1346. { .compatible = "qcom,ipclite"},
  1347. {}
  1348. };
  1349. MODULE_DEVICE_TABLE(of, ipclite_of_match);
  1350. static struct platform_driver ipclite_driver = {
  1351. .probe = ipclite_probe,
  1352. .driver = {
  1353. .name = "ipclite",
  1354. .of_match_table = ipclite_of_match,
  1355. },
  1356. };
  1357. module_platform_driver(ipclite_driver);
  1358. MODULE_DESCRIPTION("IPCLite Driver");
  1359. MODULE_LICENSE("GPL v2");
  1360. MODULE_SOFTDEP("pre: qcom_hwspinlock");