ipclite.c 46 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2021-2022, Qualcomm Innovation Center, Inc. All rights reserved.
  4. */
  5. #define pr_fmt(fmt) "%s:%s: " fmt, KBUILD_MODNAME, __func__
  6. #include <linux/module.h>
  7. #include <linux/irq.h>
  8. #include <linux/interrupt.h>
  9. #include <linux/irqdomain.h>
  10. #include <linux/platform_device.h>
  11. #include <linux/of.h>
  12. #include <linux/of_address.h>
  13. #include <linux/of_irq.h>
  14. #include <asm/memory.h>
  15. #include <linux/sizes.h>
  16. #include <linux/hwspinlock.h>
  17. #include <linux/qcom_scm.h>
  18. #include <linux/sysfs.h>
  19. #include "ipclite_client.h"
  20. #include "ipclite.h"
  21. #define GLOBAL_ATOMICS_ENABLED 1
  22. #define GLOBAL_ATOMICS_DISABLED 0
  23. #define FIFO_FULL_RESERVE 8
  24. #define FIFO_ALIGNMENT 8
  25. static struct ipclite_info *ipclite;
  26. static struct ipclite_client synx_client;
  27. static struct ipclite_client test_client;
  28. static struct ipclite_hw_mutex_ops *ipclite_hw_mutex;
  29. static struct ipclite_debug_info *ipclite_dbg_info;
  30. static struct ipclite_debug_struct *ipclite_dbg_struct;
  31. static struct ipclite_debug_inmem_buf *ipclite_dbg_inmem;
  32. static struct mutex ssr_mutex;
  33. static struct kobject *sysfs_kobj;
  34. static uint32_t channel_status_info[IPCMEM_NUM_HOSTS];
  35. static u32 global_atomic_support = GLOBAL_ATOMICS_ENABLED;
  36. static uint32_t ipclite_debug_level = IPCLITE_ERR | IPCLITE_WARN | IPCLITE_INFO;
  37. static uint32_t ipclite_debug_control = IPCLITE_DMESG_LOG, ipclite_debug_dump;
  38. static void IPCLITE_OS_INMEM_LOG(const char *psztStr, ...)
  39. {
  40. uint32_t local_index = 0;
  41. va_list pArgs;
  42. va_start(pArgs, psztStr);
  43. /* Incrementing the index atomically and storing the index in local variable */
  44. local_index = ipclite_global_atomic_inc((ipclite_atomic_int32_t *)
  45. &ipclite_dbg_info->debug_log_index);
  46. local_index %= IPCLITE_LOG_BUF_SIZE;
  47. /* Writes data on the index location */
  48. vsnprintf(ipclite_dbg_inmem->IPCLITELog[local_index], IPCLITE_LOG_MSG_SIZE, psztStr, pArgs);
  49. va_end(pArgs);
  50. }
  51. static void ipclite_dump_debug_struct(void)
  52. {
  53. int i, host;
  54. struct ipclite_debug_struct *temp_dbg_struct;
  55. /* Check if debug structures are initialized */
  56. if (!ipclite_dbg_info || !ipclite_dbg_struct) {
  57. pr_err("Debug Structures not initialized\n");
  58. return;
  59. }
  60. /* Check if debug structures are enabled before printing */
  61. if (!(ipclite_debug_control & IPCLITE_DBG_STRUCT)) {
  62. pr_err("Debug Structures not enabled\n");
  63. return;
  64. }
  65. /* Dumping the debug structures */
  66. pr_info("------------------- Dumping IPCLite Debug Structure -------------------\n");
  67. for (host = 0; host < IPCMEM_NUM_HOSTS; host++) {
  68. if (ipclite->ipcmem.toc->recovery.configured_core[host]) {
  69. temp_dbg_struct = (struct ipclite_debug_struct *)
  70. (((char *)ipclite_dbg_struct) +
  71. (sizeof(*temp_dbg_struct) * host));
  72. pr_info("---------- Host ID: %d dbg_mem:%p ----------\n",
  73. host, temp_dbg_struct);
  74. pr_info("Total Signals Sent : %d Total Signals Received : %d\n",
  75. temp_dbg_struct->dbg_info_overall.total_numsig_sent,
  76. temp_dbg_struct->dbg_info_overall.total_numsig_recv);
  77. pr_info("Last Signal Sent to Host ID : %d Last Signal Received from Host ID : %d\n",
  78. temp_dbg_struct->dbg_info_overall.last_sent_host_id,
  79. temp_dbg_struct->dbg_info_overall.last_recv_host_id);
  80. pr_info("Last Signal ID Sent : %d Last Signal ID Received : %d\n",
  81. temp_dbg_struct->dbg_info_overall.last_sigid_sent,
  82. temp_dbg_struct->dbg_info_overall.last_sigid_recv);
  83. for (i = 0; i < IPCMEM_NUM_HOSTS; i++) {
  84. if (ipclite->ipcmem.toc->recovery.configured_core[i]) {
  85. pr_info("----------> Host ID : %d Host ID : %d Channel State: %d\n",
  86. host, i, ipclite->ipcmem.toc->toc_entry[host][i].status);
  87. pr_info("No. of Messages Sent : %d No. of Messages Received : %d\n",
  88. temp_dbg_struct->dbg_info_host[i].numsig_sent,
  89. temp_dbg_struct->dbg_info_host[i].numsig_recv);
  90. pr_info("No. of Interrupts Received : %d\n",
  91. temp_dbg_struct->dbg_info_host[i].num_intr);
  92. pr_info("TX Write Index : %d TX Read Index : %d\n",
  93. temp_dbg_struct->dbg_info_host[i].tx_wr_index,
  94. temp_dbg_struct->dbg_info_host[i].tx_rd_index);
  95. pr_info("TX Write Index[0] : %d TX Read Index[0] : %d\n",
  96. temp_dbg_struct->dbg_info_host[i].prev_tx_wr_index[0],
  97. temp_dbg_struct->dbg_info_host[i].prev_tx_rd_index[0]);
  98. pr_info("TX Write Index[1] : %d TX Read Index[1] : %d\n",
  99. temp_dbg_struct->dbg_info_host[i].prev_tx_wr_index[1],
  100. temp_dbg_struct->dbg_info_host[i].prev_tx_rd_index[1]);
  101. pr_info("RX Write Index : %d RX Read Index : %d\n",
  102. temp_dbg_struct->dbg_info_host[i].rx_wr_index,
  103. temp_dbg_struct->dbg_info_host[i].rx_rd_index);
  104. pr_info("RX Write Index[0] : %d RX Read Index[0] : %d\n",
  105. temp_dbg_struct->dbg_info_host[i].prev_rx_wr_index[0],
  106. temp_dbg_struct->dbg_info_host[i].prev_rx_rd_index[0]);
  107. pr_info("RX Write Index[1] : %d RX Read Index[1] : %d\n",
  108. temp_dbg_struct->dbg_info_host[i].prev_rx_wr_index[1],
  109. temp_dbg_struct->dbg_info_host[i].prev_rx_rd_index[1]);
  110. }
  111. }
  112. }
  113. }
  114. return;
  115. }
  116. static void ipclite_dump_inmem_logs(void)
  117. {
  118. int i;
  119. uint32_t local_index = 0;
  120. /* Check if debug and inmem structures are initialized */
  121. if (!ipclite_dbg_info || !ipclite_dbg_inmem) {
  122. pr_err("Debug structures not initialized\n");
  123. return;
  124. }
  125. /* Check if debug structures are enabled before printing */
  126. if (!(ipclite_debug_control & IPCLITE_INMEM_LOG)) {
  127. pr_err("In-Memory Logs not enabled\n");
  128. return;
  129. }
  130. /* Dumping the debug in-memory logs */
  131. pr_info("------------------- Dumping In-Memory Logs -------------------\n");
  132. /* Storing the index atomically in local variable */
  133. local_index = ipclite_global_atomic_load_u32((ipclite_atomic_uint32_t *)
  134. &ipclite_dbg_info->debug_log_index);
  135. /* Printing from current index till the end of buffer */
  136. for (i = local_index % IPCLITE_LOG_BUF_SIZE; i < IPCLITE_LOG_BUF_SIZE; i++) {
  137. if (ipclite_dbg_inmem->IPCLITELog[i][0])
  138. pr_info("%s\n", ipclite_dbg_inmem->IPCLITELog[i]);
  139. }
  140. /* Printing from 0th index to current-1 index */
  141. for (i = 0; i < local_index % IPCLITE_LOG_BUF_SIZE; i++) {
  142. if (ipclite_dbg_inmem->IPCLITELog[i][0])
  143. pr_info("%s\n", ipclite_dbg_inmem->IPCLITELog[i]);
  144. }
  145. return;
  146. }
  147. static void ipclite_hw_mutex_acquire(void)
  148. {
  149. int32_t ret;
  150. if (ipclite != NULL) {
  151. if (!ipclite->ipcmem.toc->ipclite_features.global_atomic_support) {
  152. ret = hwspin_lock_timeout_irqsave(ipclite->hwlock,
  153. HWSPINLOCK_TIMEOUT,
  154. &ipclite->ipclite_hw_mutex->flags);
  155. if (ret) {
  156. IPCLITE_OS_LOG(IPCLITE_ERR, "Hw mutex lock acquire failed\n");
  157. return;
  158. }
  159. ipclite->ipcmem.toc->recovery.global_atomic_hwlock_owner = IPCMEM_APPS;
  160. IPCLITE_OS_LOG(IPCLITE_DBG, "Hw mutex lock acquired\n");
  161. }
  162. }
  163. }
  164. static void ipclite_hw_mutex_release(void)
  165. {
  166. if (ipclite != NULL) {
  167. if (!ipclite->ipcmem.toc->ipclite_features.global_atomic_support) {
  168. ipclite->ipcmem.toc->recovery.global_atomic_hwlock_owner =
  169. IPCMEM_INVALID_HOST;
  170. hwspin_unlock_irqrestore(ipclite->hwlock,
  171. &ipclite->ipclite_hw_mutex->flags);
  172. IPCLITE_OS_LOG(IPCLITE_DBG, "Hw mutex lock release\n");
  173. }
  174. }
  175. }
  176. void ipclite_atomic_init_u32(ipclite_atomic_uint32_t *addr, uint32_t data)
  177. {
  178. atomic_set(addr, data);
  179. }
  180. EXPORT_SYMBOL(ipclite_atomic_init_u32);
  181. void ipclite_atomic_init_i32(ipclite_atomic_int32_t *addr, int32_t data)
  182. {
  183. atomic_set(addr, data);
  184. }
  185. EXPORT_SYMBOL(ipclite_atomic_init_i32);
  186. void ipclite_global_atomic_store_u32(ipclite_atomic_uint32_t *addr, uint32_t data)
  187. {
  188. /* callback to acquire hw mutex lock if atomic support is not enabled */
  189. ipclite->ipclite_hw_mutex->acquire();
  190. atomic_set(addr, data);
  191. /* callback to release hw mutex lock if atomic support is not enabled */
  192. ipclite->ipclite_hw_mutex->release();
  193. }
  194. EXPORT_SYMBOL(ipclite_global_atomic_store_u32);
  195. void ipclite_global_atomic_store_i32(ipclite_atomic_int32_t *addr, int32_t data)
  196. {
  197. /* callback to acquire hw mutex lock if atomic support is not enabled */
  198. ipclite->ipclite_hw_mutex->acquire();
  199. atomic_set(addr, data);
  200. /* callback to release hw mutex lock if atomic support is not enabled */
  201. ipclite->ipclite_hw_mutex->release();
  202. }
  203. EXPORT_SYMBOL(ipclite_global_atomic_store_i32);
  204. uint32_t ipclite_global_atomic_load_u32(ipclite_atomic_uint32_t *addr)
  205. {
  206. uint32_t ret;
  207. /* callback to acquire hw mutex lock if atomic support is not enabled */
  208. ipclite->ipclite_hw_mutex->acquire();
  209. ret = atomic_read(addr);
  210. /* callback to release hw mutex lock if atomic support is not enabled */
  211. ipclite->ipclite_hw_mutex->release();
  212. return ret;
  213. }
  214. EXPORT_SYMBOL(ipclite_global_atomic_load_u32);
  215. int32_t ipclite_global_atomic_load_i32(ipclite_atomic_int32_t *addr)
  216. {
  217. int32_t ret;
  218. /* callback to acquire hw mutex lock if atomic support is not enabled */
  219. ipclite->ipclite_hw_mutex->acquire();
  220. ret = atomic_read(addr);
  221. /* callback to release hw mutex lock if atomic support is not enabled */
  222. ipclite->ipclite_hw_mutex->release();
  223. return ret;
  224. }
  225. EXPORT_SYMBOL(ipclite_global_atomic_load_i32);
  226. uint32_t ipclite_global_test_and_set_bit(uint32_t nr, ipclite_atomic_uint32_t *addr)
  227. {
  228. uint32_t ret;
  229. uint32_t mask = (1 << nr);
  230. /* callback to acquire hw mutex lock if atomic support is not enabled */
  231. ipclite->ipclite_hw_mutex->acquire();
  232. ret = atomic_fetch_or(mask, addr);
  233. /* callback to release hw mutex lock if atomic support is not enabled */
  234. ipclite->ipclite_hw_mutex->release();
  235. return ret;
  236. }
  237. EXPORT_SYMBOL(ipclite_global_test_and_set_bit);
  238. uint32_t ipclite_global_test_and_clear_bit(uint32_t nr, ipclite_atomic_uint32_t *addr)
  239. {
  240. uint32_t ret;
  241. uint32_t mask = (1 << nr);
  242. /* callback to acquire hw mutex lock if atomic support is not enabled */
  243. ipclite->ipclite_hw_mutex->acquire();
  244. ret = atomic_fetch_and(~mask, addr);
  245. /* callback to release hw mutex lock if atomic support is not enabled */
  246. ipclite->ipclite_hw_mutex->release();
  247. return ret;
  248. }
  249. EXPORT_SYMBOL(ipclite_global_test_and_clear_bit);
  250. int32_t ipclite_global_atomic_inc(ipclite_atomic_int32_t *addr)
  251. {
  252. int32_t ret = 0;
  253. /* callback to acquire hw mutex lock if atomic support is not enabled */
  254. ipclite->ipclite_hw_mutex->acquire();
  255. ret = atomic_fetch_add(1, addr);
  256. /* callback to release hw mutex lock if atomic support is not enabled */
  257. ipclite->ipclite_hw_mutex->release();
  258. return ret;
  259. }
  260. EXPORT_SYMBOL(ipclite_global_atomic_inc);
  261. int32_t ipclite_global_atomic_dec(ipclite_atomic_int32_t *addr)
  262. {
  263. int32_t ret = 0;
  264. /* callback to acquire hw mutex lock if atomic support is not enabled */
  265. ipclite->ipclite_hw_mutex->acquire();
  266. ret = atomic_fetch_sub(1, addr);
  267. /* callback to release hw mutex lock if atomic support is not enabled */
  268. ipclite->ipclite_hw_mutex->release();
  269. return ret;
  270. }
  271. EXPORT_SYMBOL(ipclite_global_atomic_dec);
  272. static size_t ipcmem_rx_avail(struct ipclite_fifo *rx_fifo)
  273. {
  274. size_t len;
  275. u32 head;
  276. u32 tail;
  277. head = le32_to_cpu(*rx_fifo->head);
  278. tail = le32_to_cpu(*rx_fifo->tail);
  279. IPCLITE_OS_LOG(IPCLITE_DBG, "head=%d, tail=%d\n", head, tail);
  280. if (head < tail)
  281. len = rx_fifo->length - tail + head;
  282. else
  283. len = head - tail;
  284. if (WARN_ON_ONCE(len > rx_fifo->length))
  285. len = 0;
  286. IPCLITE_OS_LOG(IPCLITE_DBG, "len=%d\n", len);
  287. return len;
  288. }
  289. static void ipcmem_rx_peak(struct ipclite_fifo *rx_fifo,
  290. void *data, size_t count)
  291. {
  292. size_t len;
  293. u32 tail;
  294. tail = le32_to_cpu(*rx_fifo->tail);
  295. if (WARN_ON_ONCE(tail > rx_fifo->length))
  296. return;
  297. if (tail >= rx_fifo->length)
  298. tail -= rx_fifo->length;
  299. len = min_t(size_t, count, rx_fifo->length - tail);
  300. if (len)
  301. memcpy_fromio(data, rx_fifo->fifo + tail, len);
  302. if (len != count)
  303. memcpy_fromio(data + len, rx_fifo->fifo, (count - len));
  304. }
  305. static void ipcmem_rx_advance(struct ipclite_fifo *rx_fifo,
  306. size_t count, uint32_t core_id)
  307. {
  308. u32 tail;
  309. tail = le32_to_cpu(*rx_fifo->tail);
  310. tail += count;
  311. if (tail >= rx_fifo->length)
  312. tail %= rx_fifo->length;
  313. *rx_fifo->tail = cpu_to_le32(tail);
  314. /* Storing the debug data in debug structures */
  315. if (ipclite_debug_control & IPCLITE_DBG_STRUCT) {
  316. ipclite_dbg_struct->dbg_info_host[core_id].prev_rx_wr_index[1] =
  317. ipclite_dbg_struct->dbg_info_host[core_id].prev_rx_wr_index[0];
  318. ipclite_dbg_struct->dbg_info_host[core_id].prev_rx_wr_index[0] =
  319. ipclite_dbg_struct->dbg_info_host[core_id].rx_wr_index;
  320. ipclite_dbg_struct->dbg_info_host[core_id].rx_wr_index = *rx_fifo->head;
  321. ipclite_dbg_struct->dbg_info_host[core_id].prev_rx_rd_index[1] =
  322. ipclite_dbg_struct->dbg_info_host[core_id].prev_rx_rd_index[0];
  323. ipclite_dbg_struct->dbg_info_host[core_id].prev_rx_rd_index[0] =
  324. ipclite_dbg_struct->dbg_info_host[core_id].rx_rd_index;
  325. ipclite_dbg_struct->dbg_info_host[core_id].rx_rd_index = *rx_fifo->tail;
  326. ipclite_dbg_struct->dbg_info_overall.total_numsig_recv++;
  327. ipclite_dbg_struct->dbg_info_host[core_id].numsig_recv++;
  328. }
  329. }
  330. static size_t ipcmem_tx_avail(struct ipclite_fifo *tx_fifo)
  331. {
  332. u32 head;
  333. u32 tail;
  334. u32 avail;
  335. head = le32_to_cpu(*tx_fifo->head);
  336. tail = le32_to_cpu(*tx_fifo->tail);
  337. if (tail <= head)
  338. avail = tx_fifo->length - head + tail;
  339. else
  340. avail = tail - head;
  341. if (avail < FIFO_FULL_RESERVE)
  342. avail = 0;
  343. else
  344. avail -= FIFO_FULL_RESERVE;
  345. if (WARN_ON_ONCE(avail > tx_fifo->length))
  346. avail = 0;
  347. return avail;
  348. }
  349. static unsigned int ipcmem_tx_write_one(struct ipclite_fifo *tx_fifo,
  350. unsigned int head,
  351. const void *data, size_t count)
  352. {
  353. size_t len;
  354. if (WARN_ON_ONCE(head > tx_fifo->length))
  355. return head;
  356. len = min_t(size_t, count, tx_fifo->length - head);
  357. if (len)
  358. memcpy(tx_fifo->fifo + head, data, len);
  359. if (len != count)
  360. memcpy(tx_fifo->fifo, data + len, count - len);
  361. head += count;
  362. if (head >= tx_fifo->length)
  363. head -= tx_fifo->length;
  364. return head;
  365. }
  366. static void ipcmem_tx_write(struct ipclite_fifo *tx_fifo,
  367. const void *data, size_t dlen, uint32_t core_id, uint32_t signal_id)
  368. {
  369. unsigned int head;
  370. head = le32_to_cpu(*tx_fifo->head);
  371. head = ipcmem_tx_write_one(tx_fifo, head, data, dlen);
  372. head = ALIGN(head, 8);
  373. if (head >= tx_fifo->length)
  374. head -= tx_fifo->length;
  375. /* Ensure ordering of fifo and head update */
  376. wmb();
  377. *tx_fifo->head = cpu_to_le32(head);
  378. IPCLITE_OS_LOG(IPCLITE_DBG, "head : %d core_id : %d signal_id : %d\n",
  379. *tx_fifo->head, core_id, signal_id);
  380. /* Storing the debug data in debug structures */
  381. if (ipclite_debug_control & IPCLITE_DBG_STRUCT) {
  382. ipclite_dbg_struct->dbg_info_host[core_id].prev_tx_wr_index[1] =
  383. ipclite_dbg_struct->dbg_info_host[core_id].prev_tx_wr_index[0];
  384. ipclite_dbg_struct->dbg_info_host[core_id].prev_tx_wr_index[0] =
  385. ipclite_dbg_struct->dbg_info_host[core_id].tx_wr_index;
  386. ipclite_dbg_struct->dbg_info_host[core_id].tx_wr_index = *tx_fifo->head;
  387. ipclite_dbg_struct->dbg_info_host[core_id].prev_tx_rd_index[1] =
  388. ipclite_dbg_struct->dbg_info_host[core_id].prev_tx_rd_index[0];
  389. ipclite_dbg_struct->dbg_info_host[core_id].prev_tx_rd_index[0] =
  390. ipclite_dbg_struct->dbg_info_host[core_id].tx_rd_index;
  391. ipclite_dbg_struct->dbg_info_host[core_id].tx_rd_index = *tx_fifo->tail;
  392. ipclite_dbg_struct->dbg_info_overall.total_numsig_sent++;
  393. ipclite_dbg_struct->dbg_info_host[core_id].numsig_sent++;
  394. ipclite_dbg_struct->dbg_info_overall.last_sent_host_id = core_id;
  395. ipclite_dbg_struct->dbg_info_overall.last_sigid_sent = signal_id;
  396. }
  397. }
  398. static size_t ipclite_rx_avail(struct ipclite_channel *channel)
  399. {
  400. return channel->rx_fifo->avail(channel->rx_fifo);
  401. }
  402. static void ipclite_rx_peak(struct ipclite_channel *channel,
  403. void *data, size_t count)
  404. {
  405. channel->rx_fifo->peak(channel->rx_fifo, data, count);
  406. }
  407. static void ipclite_rx_advance(struct ipclite_channel *channel,
  408. size_t count)
  409. {
  410. channel->rx_fifo->advance(channel->rx_fifo, count, channel->remote_pid);
  411. }
  412. static size_t ipclite_tx_avail(struct ipclite_channel *channel)
  413. {
  414. return channel->tx_fifo->avail(channel->tx_fifo);
  415. }
  416. static void ipclite_tx_write(struct ipclite_channel *channel,
  417. const void *data, size_t dlen)
  418. {
  419. channel->tx_fifo->write(channel->tx_fifo, data, dlen, channel->remote_pid,
  420. channel->irq_info->signal_id);
  421. }
  422. static int ipclite_rx_data(struct ipclite_channel *channel, size_t avail)
  423. {
  424. uint64_t data;
  425. int ret = 0;
  426. if (avail < sizeof(data)) {
  427. IPCLITE_OS_LOG(IPCLITE_ERR, "Not enough data in fifo, Core : %d Signal : %d\n",
  428. channel->remote_pid, channel->irq_info->signal_id);
  429. return -EAGAIN;
  430. }
  431. ipclite_rx_peak(channel, &data, sizeof(data));
  432. if (synx_client.reg_complete == 1) {
  433. if (synx_client.callback)
  434. synx_client.callback(channel->remote_pid, data,
  435. synx_client.priv_data);
  436. }
  437. ipclite_rx_advance(channel, ALIGN(sizeof(data), 8));
  438. return ret;
  439. }
  440. static int ipclite_rx_test_data(struct ipclite_channel *channel, size_t avail)
  441. {
  442. uint64_t data;
  443. int ret = 0;
  444. if (avail < sizeof(data)) {
  445. IPCLITE_OS_LOG(IPCLITE_ERR, "Not enough data in fifo, Core : %d Signal : %d\n",
  446. channel->remote_pid, channel->irq_info->signal_id);
  447. return -EAGAIN;
  448. }
  449. ipclite_rx_peak(channel, &data, sizeof(data));
  450. if (test_client.reg_complete == 1) {
  451. if (test_client.callback)
  452. test_client.callback(channel->remote_pid, data,
  453. test_client.priv_data);
  454. }
  455. ipclite_rx_advance(channel, ALIGN(sizeof(data), 8));
  456. return ret;
  457. }
  458. static irqreturn_t ipclite_intr(int irq, void *data)
  459. {
  460. struct ipclite_channel *channel;
  461. struct ipclite_irq_info *irq_info;
  462. unsigned int avail = 0;
  463. int ret = 0;
  464. uint64_t msg;
  465. irq_info = (struct ipclite_irq_info *)data;
  466. channel = container_of(irq_info, struct ipclite_channel, irq_info[irq_info->signal_id]);
  467. IPCLITE_OS_LOG(IPCLITE_DBG, "Interrupt received from Core : %d Signal : %d\n",
  468. channel->remote_pid, irq_info->signal_id);
  469. /* Storing the debug data in debug structures */
  470. if (ipclite_debug_control & IPCLITE_DBG_STRUCT) {
  471. ipclite_dbg_struct->dbg_info_host[channel->remote_pid].num_intr++;
  472. ipclite_dbg_struct->dbg_info_overall.last_recv_host_id = channel->remote_pid;
  473. ipclite_dbg_struct->dbg_info_overall.last_sigid_recv = irq_info->signal_id;
  474. }
  475. if (irq_info->signal_id == IPCLITE_MSG_SIGNAL) {
  476. for (;;) {
  477. avail = ipclite_rx_avail(channel);
  478. if (avail < sizeof(msg))
  479. break;
  480. ret = ipclite_rx_data(channel, avail);
  481. }
  482. IPCLITE_OS_LOG(IPCLITE_DBG, "checking messages in rx_fifo done\n");
  483. } else if (irq_info->signal_id == IPCLITE_VERSION_SIGNAL) {
  484. IPCLITE_OS_LOG(IPCLITE_DBG, "Versioning is currently not enabled\n");
  485. } else if (irq_info->signal_id == IPCLITE_TEST_SIGNAL) {
  486. for (;;) {
  487. avail = ipclite_rx_avail(channel);
  488. if (avail < sizeof(msg))
  489. break;
  490. ret = ipclite_rx_test_data(channel, avail);
  491. }
  492. IPCLITE_OS_LOG(IPCLITE_DBG, "checking messages in rx_fifo done\n");
  493. } else {
  494. IPCLITE_OS_LOG(IPCLITE_ERR, "Wrong Interrupt Signal from core : %d signal : %d\n",
  495. channel->remote_pid, irq_info->signal_id);
  496. }
  497. return IRQ_HANDLED;
  498. }
  499. static int ipclite_tx(struct ipclite_channel *channel,
  500. uint64_t data, size_t dlen, uint32_t ipclite_signal)
  501. {
  502. unsigned long flags;
  503. int ret = 0;
  504. spin_lock_irqsave(&channel->tx_lock, flags);
  505. if (ipclite_tx_avail(channel) < dlen) {
  506. spin_unlock_irqrestore(&channel->tx_lock, flags);
  507. ret = -EAGAIN;
  508. return ret;
  509. }
  510. ipclite_tx_write(channel, &data, dlen);
  511. mbox_send_message(channel->irq_info[ipclite_signal].mbox_chan, NULL);
  512. mbox_client_txdone(channel->irq_info[ipclite_signal].mbox_chan, 0);
  513. spin_unlock_irqrestore(&channel->tx_lock, flags);
  514. return ret;
  515. }
  516. static int ipclite_send_debug_info(int32_t proc_id)
  517. {
  518. int ret = 0;
  519. if (proc_id < 0 || proc_id >= IPCMEM_NUM_HOSTS) {
  520. IPCLITE_OS_LOG(IPCLITE_ERR, "Invalid proc_id : %d\n", proc_id);
  521. return -EINVAL;
  522. }
  523. if (channel_status_info[proc_id] != CHANNEL_ACTIVE) {
  524. if (ipclite->ipcmem.toc->toc_entry[IPCMEM_APPS][proc_id].status == CHANNEL_ACTIVE) {
  525. channel_status_info[proc_id] = CHANNEL_ACTIVE;
  526. } else {
  527. IPCLITE_OS_LOG(IPCLITE_ERR, "Cannot Send, Core %d is Inactive\n", proc_id);
  528. return -IPCLITE_EINCHAN;
  529. }
  530. }
  531. ret = mbox_send_message(ipclite->channel[proc_id].irq_info[IPCLITE_DEBUG_SIGNAL].mbox_chan,
  532. NULL);
  533. if (ret < IPCLITE_SUCCESS) {
  534. IPCLITE_OS_LOG(IPCLITE_ERR,
  535. "Debug Signal sending failed to Core : %d Signal : %d ret : %d\n",
  536. proc_id, IPCLITE_DEBUG_SIGNAL, ret);
  537. return -IPCLITE_FAILURE;
  538. }
  539. IPCLITE_OS_LOG(IPCLITE_DBG,
  540. "Debug Signal send completed to core : %d signal : %d ret : %d\n",
  541. proc_id, IPCLITE_DEBUG_SIGNAL, ret);
  542. return IPCLITE_SUCCESS;
  543. }
  544. int ipclite_ssr_update(int32_t proc_id)
  545. {
  546. int ret = 0;
  547. if (proc_id < 0 || proc_id >= IPCMEM_NUM_HOSTS) {
  548. IPCLITE_OS_LOG(IPCLITE_ERR, "Invalid proc_id : %d\n", proc_id);
  549. return -EINVAL;
  550. }
  551. if (channel_status_info[proc_id] != CHANNEL_ACTIVE) {
  552. if (ipclite->ipcmem.toc->toc_entry[IPCMEM_APPS][proc_id].status == CHANNEL_ACTIVE) {
  553. channel_status_info[proc_id] = CHANNEL_ACTIVE;
  554. } else {
  555. IPCLITE_OS_LOG(IPCLITE_ERR, "Cannot Send, Core %d is Inactive\n", proc_id);
  556. return -IPCLITE_EINCHAN;
  557. }
  558. }
  559. ret = mbox_send_message(ipclite->channel[proc_id].irq_info[IPCLITE_SSR_SIGNAL].mbox_chan,
  560. NULL);
  561. if (ret < IPCLITE_SUCCESS) {
  562. IPCLITE_OS_LOG(IPCLITE_ERR,
  563. "SSR Signal sending failed to Core : %d Signal : %d ret : %d\n",
  564. proc_id, IPCLITE_SSR_SIGNAL, ret);
  565. return -IPCLITE_FAILURE;
  566. }
  567. IPCLITE_OS_LOG(IPCLITE_DBG,
  568. "SSR Signal send completed to core : %d signal : %d ret : %d\n",
  569. proc_id, IPCLITE_SSR_SIGNAL, ret);
  570. return IPCLITE_SUCCESS;
  571. }
  572. void ipclite_recover(enum ipcmem_host_type core_id)
  573. {
  574. int ret, i, host, host0, host1;
  575. IPCLITE_OS_LOG(IPCLITE_DBG, "IPCLite Recover - Crashed Core : %d\n", core_id);
  576. /* verify and reset the hw mutex lock */
  577. if (core_id == ipclite->ipcmem.toc->recovery.global_atomic_hwlock_owner) {
  578. ipclite->ipcmem.toc->recovery.global_atomic_hwlock_owner = IPCMEM_INVALID_HOST;
  579. hwspin_unlock_raw(ipclite->hwlock);
  580. IPCLITE_OS_LOG(IPCLITE_DBG, "HW Lock Reset\n");
  581. }
  582. mutex_lock(&ssr_mutex);
  583. /* Set the Global Channel Status to 0 to avoid Race condition */
  584. for (i = 0; i < MAX_PARTITION_COUNT; i++) {
  585. host0 = ipcmem_toc_partition_entries[i].host0;
  586. host1 = ipcmem_toc_partition_entries[i].host1;
  587. if (host0 == core_id || host1 == core_id) {
  588. ipclite_global_atomic_store_i32((ipclite_atomic_int32_t *)
  589. (&(ipclite->ipcmem.toc->toc_entry[host0][host1].status)), 0);
  590. ipclite_global_atomic_store_i32((ipclite_atomic_int32_t *)
  591. (&(ipclite->ipcmem.toc->toc_entry[host1][host0].status)), 0);
  592. channel_status_info[core_id] =
  593. ipclite->ipcmem.toc->toc_entry[host0][host1].status;
  594. }
  595. IPCLITE_OS_LOG(IPCLITE_DBG, "Global Channel Status : [%d][%d] : %d\n", host0, host1,
  596. ipclite->ipcmem.toc->toc_entry[host0][host1].status);
  597. IPCLITE_OS_LOG(IPCLITE_DBG, "Global Channel Status : [%d][%d] : %d\n", host1, host0,
  598. ipclite->ipcmem.toc->toc_entry[host1][host0].status);
  599. }
  600. /* Resets the TX/RX queue */
  601. *(ipclite->channel[core_id].tx_fifo->head) = 0;
  602. *(ipclite->channel[core_id].rx_fifo->tail) = 0;
  603. IPCLITE_OS_LOG(IPCLITE_DBG, "TX Fifo Reset : %d\n",
  604. *(ipclite->channel[core_id].tx_fifo->head));
  605. IPCLITE_OS_LOG(IPCLITE_DBG, "RX Fifo Reset : %d\n",
  606. *(ipclite->channel[core_id].rx_fifo->tail));
  607. /* Increment the Global Channel Status for APPS and crashed core*/
  608. ipclite_global_atomic_inc((ipclite_atomic_int32_t *)
  609. (&(ipclite->ipcmem.toc->toc_entry[IPCMEM_APPS][core_id].status)));
  610. ipclite_global_atomic_inc((ipclite_atomic_int32_t *)
  611. (&(ipclite->ipcmem.toc->toc_entry[core_id][IPCMEM_APPS].status)));
  612. channel_status_info[core_id] =
  613. ipclite->ipcmem.toc->toc_entry[IPCMEM_APPS][core_id].status;
  614. /* Update other cores about SSR */
  615. for (host = 1; host < IPCMEM_NUM_HOSTS; host++) {
  616. if (host != core_id && ipclite->ipcmem.toc->recovery.configured_core[host]) {
  617. ret = ipclite_ssr_update(host);
  618. if (ret < IPCLITE_SUCCESS)
  619. IPCLITE_OS_LOG(IPCLITE_ERR,
  620. "Failed to send SSR update to core : %d\n", host);
  621. else
  622. IPCLITE_OS_LOG(IPCLITE_DBG, "SSR update sent to core %d\n", host);
  623. }
  624. }
  625. mutex_unlock(&ssr_mutex);
  626. /* Dump the debug information */
  627. if (ipclite_debug_dump & IPCLITE_DUMP_SSR) {
  628. ipclite_dump_debug_struct();
  629. ipclite_dump_inmem_logs();
  630. }
  631. return;
  632. }
  633. EXPORT_SYMBOL(ipclite_recover);
  634. int ipclite_msg_send(int32_t proc_id, uint64_t data)
  635. {
  636. int ret = 0;
  637. if (proc_id < 0 || proc_id >= IPCMEM_NUM_HOSTS) {
  638. IPCLITE_OS_LOG(IPCLITE_ERR, "Invalid proc_id : %d\n", proc_id);
  639. return -EINVAL;
  640. }
  641. if (channel_status_info[proc_id] != CHANNEL_ACTIVE) {
  642. if (ipclite->ipcmem.toc->toc_entry[IPCMEM_APPS][proc_id].status == CHANNEL_ACTIVE) {
  643. channel_status_info[proc_id] = CHANNEL_ACTIVE;
  644. } else {
  645. IPCLITE_OS_LOG(IPCLITE_ERR, "Cannot Send, Core %d is Inactive\n", proc_id);
  646. return -IPCLITE_EINCHAN;
  647. }
  648. }
  649. ret = ipclite_tx(&ipclite->channel[proc_id], data, sizeof(data),
  650. IPCLITE_MSG_SIGNAL);
  651. IPCLITE_OS_LOG(IPCLITE_DBG, "Message send complete to core : %d signal : %d ret : %d\n",
  652. proc_id, IPCLITE_MSG_SIGNAL, ret);
  653. return ret;
  654. }
  655. EXPORT_SYMBOL(ipclite_msg_send);
  656. int ipclite_register_client(IPCLite_Client cb_func_ptr, void *priv)
  657. {
  658. if (!cb_func_ptr) {
  659. IPCLITE_OS_LOG(IPCLITE_ERR, "Invalid callback pointer\n");
  660. return -EINVAL;
  661. }
  662. synx_client.callback = cb_func_ptr;
  663. synx_client.priv_data = priv;
  664. synx_client.reg_complete = 1;
  665. IPCLITE_OS_LOG(IPCLITE_DBG, "Client Registration completed\n");
  666. return 0;
  667. }
  668. EXPORT_SYMBOL(ipclite_register_client);
  669. int ipclite_test_msg_send(int32_t proc_id, uint64_t data)
  670. {
  671. int ret = 0;
  672. if (proc_id < 0 || proc_id >= IPCMEM_NUM_HOSTS) {
  673. IPCLITE_OS_LOG(IPCLITE_ERR, "Invalid proc_id : %d\n", proc_id);
  674. return -EINVAL;
  675. }
  676. if (channel_status_info[proc_id] != CHANNEL_ACTIVE) {
  677. if (ipclite->ipcmem.toc->toc_entry[IPCMEM_APPS][proc_id].status == CHANNEL_ACTIVE) {
  678. channel_status_info[proc_id] = CHANNEL_ACTIVE;
  679. } else {
  680. IPCLITE_OS_LOG(IPCLITE_ERR, "Cannot Send, Core %d is Inactive\n", proc_id);
  681. return -IPCLITE_EINCHAN;
  682. }
  683. }
  684. ret = ipclite_tx(&ipclite->channel[proc_id], data, sizeof(data),
  685. IPCLITE_TEST_SIGNAL);
  686. IPCLITE_OS_LOG(IPCLITE_DBG, "Test Msg send complete to core : %d signal : %d ret : %d\n",
  687. proc_id, IPCLITE_TEST_SIGNAL, ret);
  688. return ret;
  689. }
  690. EXPORT_SYMBOL(ipclite_test_msg_send);
  691. int ipclite_register_test_client(IPCLite_Client cb_func_ptr, void *priv)
  692. {
  693. if (!cb_func_ptr) {
  694. IPCLITE_OS_LOG(IPCLITE_ERR, "Invalid callback pointer\n");
  695. return -EINVAL;
  696. }
  697. test_client.callback = cb_func_ptr;
  698. test_client.priv_data = priv;
  699. test_client.reg_complete = 1;
  700. IPCLITE_OS_LOG(IPCLITE_DBG, "Test Client Registration Completed\n");
  701. return 0;
  702. }
  703. EXPORT_SYMBOL(ipclite_register_test_client);
  704. static int map_ipcmem(struct ipclite_info *ipclite, const char *name)
  705. {
  706. struct device *dev;
  707. struct device_node *np;
  708. struct resource r;
  709. int ret = 0;
  710. dev = ipclite->dev;
  711. np = of_parse_phandle(dev->of_node, name, 0);
  712. if (!np) {
  713. IPCLITE_OS_LOG(IPCLITE_ERR, "No %s specified\n", name);
  714. return -EINVAL;
  715. }
  716. ret = of_address_to_resource(np, 0, &r);
  717. of_node_put(np);
  718. if (ret)
  719. return ret;
  720. ipclite->ipcmem.mem.aux_base = (u64)r.start;
  721. ipclite->ipcmem.mem.size = resource_size(&r);
  722. ipclite->ipcmem.mem.virt_base = devm_ioremap_wc(dev, r.start,
  723. resource_size(&r));
  724. if (!ipclite->ipcmem.mem.virt_base)
  725. return -ENOMEM;
  726. IPCLITE_OS_LOG(IPCLITE_DBG, "aux_base = %lx, size=%d,virt_base=%p\n",
  727. ipclite->ipcmem.mem.aux_base, ipclite->ipcmem.mem.size,
  728. ipclite->ipcmem.mem.virt_base);
  729. return ret;
  730. }
  731. static void ipcmem_init(struct ipclite_mem *ipcmem)
  732. {
  733. int host, host0, host1;
  734. int i = 0;
  735. ipcmem->toc = ipcmem->mem.virt_base;
  736. IPCLITE_OS_LOG(IPCLITE_DBG, "toc_base = %p\n", ipcmem->toc);
  737. ipcmem->toc->hdr.size = IPCMEM_TOC_SIZE;
  738. IPCLITE_OS_LOG(IPCLITE_DBG, "toc->hdr.size = %d\n", ipcmem->toc->hdr.size);
  739. /*Fill in global partition details*/
  740. ipcmem->toc->toc_entry_global = ipcmem_toc_global_partition_entry;
  741. ipcmem->global_partition = (struct ipcmem_global_partition *)
  742. ((char *)ipcmem->mem.virt_base +
  743. ipcmem_toc_global_partition_entry.base_offset);
  744. IPCLITE_OS_LOG(IPCLITE_DBG, "base_offset =%x,ipcmem->global_partition = %p\n",
  745. ipcmem_toc_global_partition_entry.base_offset,
  746. ipcmem->global_partition);
  747. ipcmem->global_partition->hdr = global_partition_hdr;
  748. IPCLITE_OS_LOG(IPCLITE_DBG, "hdr.type = %x,hdr.offset = %x,hdr.size = %d\n",
  749. ipcmem->global_partition->hdr.partition_type,
  750. ipcmem->global_partition->hdr.region_offset,
  751. ipcmem->global_partition->hdr.region_size);
  752. /* Fill in each IPCMEM TOC entry from ipcmem_toc_partition_entries config*/
  753. for (i = 0; i < MAX_PARTITION_COUNT; i++) {
  754. host0 = ipcmem_toc_partition_entries[i].host0;
  755. host1 = ipcmem_toc_partition_entries[i].host1;
  756. IPCLITE_OS_LOG(IPCLITE_DBG, "host0 = %d, host1=%d\n", host0, host1);
  757. ipcmem->toc->toc_entry[host0][host1] = ipcmem_toc_partition_entries[i];
  758. ipcmem->toc->toc_entry[host1][host0] = ipcmem_toc_partition_entries[i];
  759. if (host0 == IPCMEM_APPS && host1 == IPCMEM_APPS) {
  760. /* Updating the Global Channel Status for APPS Loopback */
  761. ipcmem->toc->toc_entry[host0][host1].status = CHANNEL_ACTIVE;
  762. ipcmem->toc->toc_entry[host1][host0].status = CHANNEL_ACTIVE;
  763. /* Updating Local Channel Status */
  764. channel_status_info[host1] = ipcmem->toc->toc_entry[host0][host1].status;
  765. } else if (host0 == IPCMEM_APPS || host1 == IPCMEM_APPS) {
  766. /* Updating the Global Channel Status */
  767. ipcmem->toc->toc_entry[host0][host1].status = CHANNEL_ACTIVATE_IN_PROGRESS;
  768. ipcmem->toc->toc_entry[host1][host0].status = CHANNEL_ACTIVATE_IN_PROGRESS;
  769. /* Updating Local Channel Status */
  770. if (host0 == IPCMEM_APPS)
  771. host = host1;
  772. else if (host1 == IPCMEM_APPS)
  773. host = host0;
  774. channel_status_info[host] = ipcmem->toc->toc_entry[host0][host1].status;
  775. }
  776. ipcmem->partition[i] = (struct ipcmem_partition *)
  777. ((char *)ipcmem->mem.virt_base +
  778. ipcmem_toc_partition_entries[i].base_offset);
  779. IPCLITE_OS_LOG(IPCLITE_DBG, "partition[%d] = %p,partition_base_offset[%d]=%lx\n",
  780. i, ipcmem->partition[i],
  781. i, ipcmem_toc_partition_entries[i].base_offset);
  782. if (host0 == host1)
  783. ipcmem->partition[i]->hdr = loopback_partition_hdr;
  784. else
  785. ipcmem->partition[i]->hdr = default_partition_hdr;
  786. IPCLITE_OS_LOG(IPCLITE_DBG, "hdr.type = %x,hdr.offset = %x,hdr.size = %d\n",
  787. ipcmem->partition[i]->hdr.type,
  788. ipcmem->partition[i]->hdr.desc_offset,
  789. ipcmem->partition[i]->hdr.desc_size);
  790. }
  791. /*Making sure all writes for ipcmem configurations are completed*/
  792. wmb();
  793. ipcmem->toc->hdr.init_done = IPCMEM_INIT_COMPLETED;
  794. IPCLITE_OS_LOG(IPCLITE_DBG, "Ipcmem init completed\n");
  795. }
  796. static int ipclite_channel_irq_init(struct device *parent, struct device_node *node,
  797. struct ipclite_channel *channel)
  798. {
  799. int ret = 0;
  800. u32 index;
  801. struct ipclite_irq_info *irq_info;
  802. struct device *dev;
  803. char strs[MAX_CHANNEL_SIGNALS][IPCLITE_SIGNAL_LABEL_SIZE] = {
  804. "msg", "mem-init", "version", "test", "ssr", "debug"};
  805. dev = kzalloc(sizeof(*dev), GFP_KERNEL);
  806. if (!dev)
  807. return -ENOMEM;
  808. dev->parent = parent;
  809. dev->of_node = node;
  810. dev_set_name(dev, "%s:%pOFn", dev_name(parent->parent), node);
  811. IPCLITE_OS_LOG(IPCLITE_DBG, "Registering %s device\n", dev_name(parent->parent));
  812. ret = device_register(dev);
  813. if (ret) {
  814. IPCLITE_OS_LOG(IPCLITE_ERR, "failed to register ipclite child node\n");
  815. put_device(dev);
  816. return ret;
  817. }
  818. ret = of_property_read_u32(dev->of_node, "index",
  819. &index);
  820. if (ret) {
  821. IPCLITE_OS_LOG(IPCLITE_ERR, "failed to parse index\n");
  822. goto err_dev;
  823. }
  824. irq_info = &channel->irq_info[index];
  825. IPCLITE_OS_LOG(IPCLITE_DBG, "irq_info[%d]=%p\n", index, irq_info);
  826. irq_info->mbox_client.dev = dev;
  827. irq_info->mbox_client.knows_txdone = true;
  828. irq_info->mbox_chan = mbox_request_channel(&irq_info->mbox_client, 0);
  829. IPCLITE_OS_LOG(IPCLITE_DBG, "irq_info[%d].mbox_chan=%p\n", index, irq_info->mbox_chan);
  830. if (IS_ERR(irq_info->mbox_chan)) {
  831. if (PTR_ERR(irq_info->mbox_chan) != -EPROBE_DEFER)
  832. IPCLITE_OS_LOG(IPCLITE_ERR, "failed to acquire IPC channel\n");
  833. goto err_dev;
  834. }
  835. snprintf(irq_info->irqname, 32, "ipclite-signal-%s", strs[index]);
  836. irq_info->irq = of_irq_get(dev->of_node, 0);
  837. IPCLITE_OS_LOG(IPCLITE_DBG, "irq[%d] = %d\n", index, irq_info->irq);
  838. irq_info->signal_id = index;
  839. ret = devm_request_irq(dev, irq_info->irq,
  840. ipclite_intr,
  841. IRQF_NO_SUSPEND | IRQF_SHARED,
  842. irq_info->irqname, irq_info);
  843. if (ret) {
  844. IPCLITE_OS_LOG(IPCLITE_ERR, "failed to request IRQ\n");
  845. goto err_dev;
  846. }
  847. IPCLITE_OS_LOG(IPCLITE_DBG, "Interrupt init completed, ret = %d\n", ret);
  848. return 0;
  849. err_dev:
  850. device_unregister(dev);
  851. kfree(dev);
  852. return ret;
  853. }
  854. int32_t get_global_partition_info(struct global_region_info *global_ipcmem)
  855. {
  856. struct ipcmem_global_partition *global_partition;
  857. /* Check added to verify ipclite is initialized */
  858. if (!ipclite) {
  859. IPCLITE_OS_LOG(IPCLITE_ERR, "IPCLite not initialized\n");
  860. return -ENOMEM;
  861. }
  862. if (!global_ipcmem)
  863. return -EINVAL;
  864. global_partition = ipclite->ipcmem.global_partition;
  865. global_ipcmem->virt_base = (void *)((char *)global_partition +
  866. global_partition->hdr.region_offset);
  867. global_ipcmem->size = (size_t)(global_partition->hdr.region_size);
  868. IPCLITE_OS_LOG(IPCLITE_DBG, "base = %p, size=%lx\n", global_ipcmem->virt_base,
  869. global_ipcmem->size);
  870. return 0;
  871. }
  872. EXPORT_SYMBOL(get_global_partition_info);
  873. static struct ipcmem_partition_header *get_ipcmem_partition_hdr(struct ipclite_mem ipcmem, int local_pid,
  874. int remote_pid)
  875. {
  876. return (struct ipcmem_partition_header *)((char *)ipcmem.mem.virt_base +
  877. ipcmem.toc->toc_entry[local_pid][remote_pid].base_offset);
  878. }
  879. static void ipclite_channel_release(struct device *dev)
  880. {
  881. IPCLITE_OS_LOG(IPCLITE_INFO, "Releasing ipclite channel\n");
  882. kfree(dev);
  883. }
  884. /* Sets up following fields of IPCLite channel structure:
  885. * remote_pid,tx_fifo, rx_fifo
  886. */
  887. static int ipclite_channel_init(struct device *parent,
  888. struct device_node *node)
  889. {
  890. struct ipclite_fifo *rx_fifo;
  891. struct ipclite_fifo *tx_fifo;
  892. struct device *dev;
  893. u32 local_pid, remote_pid, global_atomic;
  894. u32 *descs;
  895. int ret = 0;
  896. struct device_node *child;
  897. struct ipcmem_partition_header *partition_hdr;
  898. dev = kzalloc(sizeof(*dev), GFP_KERNEL);
  899. if (!dev)
  900. return -ENOMEM;
  901. dev->parent = parent;
  902. dev->of_node = node;
  903. dev->release = ipclite_channel_release;
  904. dev_set_name(dev, "%s:%pOFn", dev_name(parent->parent), node);
  905. IPCLITE_OS_LOG(IPCLITE_DBG, "Registering %s device\n", dev_name(parent->parent));
  906. ret = device_register(dev);
  907. if (ret) {
  908. IPCLITE_OS_LOG(IPCLITE_ERR, "failed to register ipclite device\n");
  909. put_device(dev);
  910. kfree(dev);
  911. return ret;
  912. }
  913. local_pid = LOCAL_HOST;
  914. ret = of_property_read_u32(dev->of_node, "qcom,remote-pid",
  915. &remote_pid);
  916. if (ret) {
  917. dev_err(dev, "failed to parse qcom,remote-pid\n");
  918. goto err_put_dev;
  919. }
  920. IPCLITE_OS_LOG(IPCLITE_DBG, "remote_pid = %d, local_pid=%d\n", remote_pid, local_pid);
  921. ipclite_hw_mutex = devm_kzalloc(dev, sizeof(*ipclite_hw_mutex), GFP_KERNEL);
  922. if (!ipclite_hw_mutex) {
  923. ret = -ENOMEM;
  924. goto err_put_dev;
  925. }
  926. ret = of_property_read_u32(dev->of_node, "global_atomic", &global_atomic);
  927. if (ret) {
  928. dev_err(dev, "failed to parse global_atomic\n");
  929. goto err_put_dev;
  930. }
  931. if (global_atomic == 0)
  932. global_atomic_support = GLOBAL_ATOMICS_DISABLED;
  933. rx_fifo = devm_kzalloc(dev, sizeof(*rx_fifo), GFP_KERNEL);
  934. tx_fifo = devm_kzalloc(dev, sizeof(*tx_fifo), GFP_KERNEL);
  935. if (!rx_fifo || !tx_fifo) {
  936. ret = -ENOMEM;
  937. goto err_put_dev;
  938. }
  939. IPCLITE_OS_LOG(IPCLITE_DBG, "rx_fifo = %p, tx_fifo=%p\n", rx_fifo, tx_fifo);
  940. partition_hdr = get_ipcmem_partition_hdr(ipclite->ipcmem,
  941. local_pid, remote_pid);
  942. IPCLITE_OS_LOG(IPCLITE_DBG, "partition_hdr = %p\n", partition_hdr);
  943. descs = (u32 *)((char *)partition_hdr + partition_hdr->desc_offset);
  944. IPCLITE_OS_LOG(IPCLITE_DBG, "descs = %p\n", descs);
  945. if (local_pid < remote_pid) {
  946. tx_fifo->fifo = (char *)partition_hdr + partition_hdr->fifo0_offset;
  947. tx_fifo->length = partition_hdr->fifo0_size;
  948. rx_fifo->fifo = (char *)partition_hdr + partition_hdr->fifo1_offset;
  949. rx_fifo->length = partition_hdr->fifo1_size;
  950. tx_fifo->tail = &descs[0];
  951. tx_fifo->head = &descs[1];
  952. rx_fifo->tail = &descs[2];
  953. rx_fifo->head = &descs[3];
  954. } else {
  955. tx_fifo->fifo = (char *)partition_hdr + partition_hdr->fifo1_offset;
  956. tx_fifo->length = partition_hdr->fifo1_size;
  957. rx_fifo->fifo = (char *)partition_hdr + partition_hdr->fifo0_offset;
  958. rx_fifo->length = partition_hdr->fifo0_size;
  959. rx_fifo->tail = &descs[0];
  960. rx_fifo->head = &descs[1];
  961. tx_fifo->tail = &descs[2];
  962. tx_fifo->head = &descs[3];
  963. }
  964. if (partition_hdr->type == LOOPBACK_PARTITION_TYPE) {
  965. rx_fifo->tail = tx_fifo->tail;
  966. rx_fifo->head = tx_fifo->head;
  967. }
  968. /* rx_fifo->reset = ipcmem_rx_reset;*/
  969. rx_fifo->avail = ipcmem_rx_avail;
  970. rx_fifo->peak = ipcmem_rx_peak;
  971. rx_fifo->advance = ipcmem_rx_advance;
  972. /* tx_fifo->reset = ipcmem_tx_reset;*/
  973. tx_fifo->avail = ipcmem_tx_avail;
  974. tx_fifo->write = ipcmem_tx_write;
  975. *rx_fifo->tail = 0;
  976. *tx_fifo->head = 0;
  977. /*Store Channel Information*/
  978. ipclite->channel[remote_pid].remote_pid = remote_pid;
  979. ipclite->channel[remote_pid].tx_fifo = tx_fifo;
  980. ipclite->channel[remote_pid].rx_fifo = rx_fifo;
  981. spin_lock_init(&ipclite->channel[remote_pid].tx_lock);
  982. for_each_available_child_of_node(dev->of_node, child) {
  983. ret = ipclite_channel_irq_init(dev, child,
  984. &ipclite->channel[remote_pid]);
  985. if (ret) {
  986. IPCLITE_OS_LOG(IPCLITE_ERR, "irq setup for ipclite channel failed\n");
  987. goto err_put_dev;
  988. }
  989. }
  990. ipclite->ipcmem.toc->recovery.configured_core[remote_pid] = CONFIGURED_CORE;
  991. IPCLITE_OS_LOG(IPCLITE_DBG, "Channel init completed, ret = %d\n", ret);
  992. return ret;
  993. err_put_dev:
  994. ipclite->channel[remote_pid].channel_status = 0;
  995. device_unregister(dev);
  996. kfree(dev);
  997. return ret;
  998. }
  999. static void probe_subsystem(struct device *dev, struct device_node *np)
  1000. {
  1001. int ret = 0;
  1002. ret = ipclite_channel_init(dev, np);
  1003. if (ret)
  1004. IPCLITE_OS_LOG(IPCLITE_ERR, "IPCLite Channel init failed\n");
  1005. }
  1006. static ssize_t ipclite_dbg_lvl_write(struct kobject *kobj,
  1007. struct kobj_attribute *attr, const char *buf, size_t count)
  1008. {
  1009. int ret = 0, host = 0;
  1010. /* Parse the string from Sysfs Interface */
  1011. ret = kstrtoint(buf, 0, &ipclite_debug_level);
  1012. if (ret < IPCLITE_SUCCESS) {
  1013. IPCLITE_OS_LOG(IPCLITE_ERR, "Error parsing the sysfs value");
  1014. return -IPCLITE_FAILURE;
  1015. }
  1016. /* Check if debug structure is initialized */
  1017. if (!ipclite_dbg_info) {
  1018. IPCLITE_OS_LOG(IPCLITE_ERR, "Debug structures not initialized\n");
  1019. return -ENOMEM;
  1020. }
  1021. /* Update the Global Debug variable for FW cores */
  1022. ipclite_dbg_info->debug_level = ipclite_debug_level;
  1023. /* Memory Barrier to make sure all writes are completed */
  1024. wmb();
  1025. /* Signal other cores for updating the debug information */
  1026. for (host = 1; host < IPCMEM_NUM_HOSTS; host++) {
  1027. if (ipclite->ipcmem.toc->recovery.configured_core[host]) {
  1028. ret = ipclite_send_debug_info(host);
  1029. if (ret < IPCLITE_SUCCESS)
  1030. IPCLITE_OS_LOG(IPCLITE_ERR, "Failed to send the debug info %d\n",
  1031. host);
  1032. else
  1033. IPCLITE_OS_LOG(IPCLITE_DBG, "Debug info sent to host %d\n", host);
  1034. }
  1035. }
  1036. return count;
  1037. }
  1038. static ssize_t ipclite_dbg_ctrl_write(struct kobject *kobj,
  1039. struct kobj_attribute *attr, const char *buf, size_t count)
  1040. {
  1041. int ret = 0, host = 0;
  1042. /* Parse the string from Sysfs Interface */
  1043. ret = kstrtoint(buf, 0, &ipclite_debug_control);
  1044. if (ret < IPCLITE_SUCCESS) {
  1045. IPCLITE_OS_LOG(IPCLITE_ERR, "Error parsing the sysfs value");
  1046. return -IPCLITE_FAILURE;
  1047. }
  1048. /* Check if debug structures are initialized */
  1049. if (!ipclite_dbg_info || !ipclite_dbg_struct || !ipclite_dbg_inmem) {
  1050. IPCLITE_OS_LOG(IPCLITE_ERR, "Debug structures not initialized\n");
  1051. return -ENOMEM;
  1052. }
  1053. /* Update the Global Debug variable for FW cores */
  1054. ipclite_dbg_info->debug_control = ipclite_debug_control;
  1055. /* Memory Barrier to make sure all writes are completed */
  1056. wmb();
  1057. /* Signal other cores for updating the debug information */
  1058. for (host = 1; host < IPCMEM_NUM_HOSTS; host++) {
  1059. if (ipclite->ipcmem.toc->recovery.configured_core[host]) {
  1060. ret = ipclite_send_debug_info(host);
  1061. if (ret < IPCLITE_SUCCESS)
  1062. IPCLITE_OS_LOG(IPCLITE_ERR, "Failed to send the debug info %d\n",
  1063. host);
  1064. else
  1065. IPCLITE_OS_LOG(IPCLITE_DBG, "Debug info sent to host %d\n", host);
  1066. }
  1067. }
  1068. return count;
  1069. }
  1070. static ssize_t ipclite_dbg_dump_write(struct kobject *kobj,
  1071. struct kobj_attribute *attr, const char *buf, size_t count)
  1072. {
  1073. int ret = 0;
  1074. /* Parse the string from Sysfs Interface */
  1075. ret = kstrtoint(buf, 0, &ipclite_debug_dump);
  1076. if (ret < IPCLITE_SUCCESS) {
  1077. IPCLITE_OS_LOG(IPCLITE_ERR, "Error parsing the sysfs value");
  1078. return -IPCLITE_FAILURE;
  1079. }
  1080. /* Check if debug structures are initialized */
  1081. if (!ipclite_dbg_info || !ipclite_dbg_struct || !ipclite_dbg_inmem) {
  1082. IPCLITE_OS_LOG(IPCLITE_ERR, "Debug structures not initialized\n");
  1083. return -ENOMEM;
  1084. }
  1085. /* Dump the debug information */
  1086. if (ipclite_debug_dump & IPCLITE_DUMP_DBG_STRUCT)
  1087. ipclite_dump_debug_struct();
  1088. if (ipclite_debug_dump & IPCLITE_DUMP_INMEM_LOG)
  1089. ipclite_dump_inmem_logs();
  1090. return count;
  1091. }
  1092. struct kobj_attribute sysfs_dbg_lvl = __ATTR(ipclite_debug_level, 0660,
  1093. NULL, ipclite_dbg_lvl_write);
  1094. struct kobj_attribute sysfs_dbg_ctrl = __ATTR(ipclite_debug_control, 0660,
  1095. NULL, ipclite_dbg_ctrl_write);
  1096. struct kobj_attribute sysfs_dbg_dump = __ATTR(ipclite_debug_dump, 0660,
  1097. NULL, ipclite_dbg_dump_write);
  1098. static int ipclite_debug_sysfs_setup(void)
  1099. {
  1100. /* Creating a directory in /sys/kernel/ */
  1101. sysfs_kobj = kobject_create_and_add("ipclite", kernel_kobj);
  1102. if (!sysfs_kobj) {
  1103. IPCLITE_OS_LOG(IPCLITE_ERR, "Cannot create and add sysfs directory\n");
  1104. return -IPCLITE_FAILURE;
  1105. }
  1106. /* Creating sysfs files/interfaces for debug */
  1107. if (sysfs_create_file(sysfs_kobj, &sysfs_dbg_lvl.attr)) {
  1108. IPCLITE_OS_LOG(IPCLITE_ERR, "Cannot create sysfs debug level file\n");
  1109. return -IPCLITE_FAILURE;
  1110. }
  1111. if (sysfs_create_file(sysfs_kobj, &sysfs_dbg_ctrl.attr)) {
  1112. IPCLITE_OS_LOG(IPCLITE_ERR, "Cannot create sysfs debug control file\n");
  1113. return -IPCLITE_FAILURE;
  1114. }
  1115. if (sysfs_create_file(sysfs_kobj, &sysfs_dbg_dump.attr)) {
  1116. IPCLITE_OS_LOG(IPCLITE_ERR, "Cannot create sysfs debug dump file\n");
  1117. return -IPCLITE_FAILURE;
  1118. }
  1119. return IPCLITE_SUCCESS;
  1120. }
  1121. static int ipclite_debug_info_setup(void)
  1122. {
  1123. /* Setting up the Debug Structures */
  1124. ipclite_dbg_info = (struct ipclite_debug_info *)(((char *)ipclite->ipcmem.mem.virt_base +
  1125. ipclite->ipcmem.mem.size) - IPCLITE_DEBUG_SIZE);
  1126. if (!ipclite_dbg_info)
  1127. return -EADDRNOTAVAIL;
  1128. ipclite_dbg_struct = (struct ipclite_debug_struct *)
  1129. (((char *)ipclite_dbg_info + IPCLITE_DEBUG_INFO_SIZE) +
  1130. (sizeof(*ipclite_dbg_struct) * IPCMEM_APPS));
  1131. if (!ipclite_dbg_struct)
  1132. return -EADDRNOTAVAIL;
  1133. ipclite_dbg_inmem = (struct ipclite_debug_inmem_buf *)
  1134. (((char *)ipclite_dbg_info + IPCLITE_DEBUG_INFO_SIZE) +
  1135. (sizeof(*ipclite_dbg_struct) * IPCMEM_NUM_HOSTS));
  1136. if (!ipclite_dbg_inmem)
  1137. return -EADDRNOTAVAIL;
  1138. IPCLITE_OS_LOG(IPCLITE_DBG, "virtual_base_ptr = %p total_size : %d debug_size : %d\n",
  1139. ipclite->ipcmem.mem.virt_base, ipclite->ipcmem.mem.size, IPCLITE_DEBUG_SIZE);
  1140. IPCLITE_OS_LOG(IPCLITE_DBG, "dbg_info : %p dbg_struct : %p dbg_inmem : %p\n",
  1141. ipclite_dbg_info, ipclite_dbg_struct, ipclite_dbg_inmem);
  1142. return IPCLITE_SUCCESS;
  1143. }
  1144. static int ipclite_probe(struct platform_device *pdev)
  1145. {
  1146. int ret = 0;
  1147. int hwlock_id;
  1148. struct ipcmem_region *mem;
  1149. struct device_node *cn;
  1150. struct device_node *pn = pdev->dev.of_node;
  1151. struct ipclite_channel broadcast;
  1152. ipclite = kzalloc(sizeof(*ipclite), GFP_KERNEL);
  1153. if (!ipclite) {
  1154. ret = -ENOMEM;
  1155. goto error;
  1156. }
  1157. ipclite->dev = &pdev->dev;
  1158. hwlock_id = of_hwspin_lock_get_id(pn, 0);
  1159. if (hwlock_id < 0) {
  1160. if (hwlock_id != -EPROBE_DEFER)
  1161. dev_err(&pdev->dev, "failed to retrieve hwlock\n");
  1162. ret = hwlock_id;
  1163. goto release;
  1164. }
  1165. IPCLITE_OS_LOG(IPCLITE_DBG, "Hwlock id retrieved, hwlock_id=%d\n", hwlock_id);
  1166. ipclite->hwlock = hwspin_lock_request_specific(hwlock_id);
  1167. if (!ipclite->hwlock) {
  1168. IPCLITE_OS_LOG(IPCLITE_ERR, "Failed to assign hwlock_id\n");
  1169. ret = -ENXIO;
  1170. goto release;
  1171. }
  1172. IPCLITE_OS_LOG(IPCLITE_DBG, "Hwlock id assigned successfully, hwlock=%p\n",
  1173. ipclite->hwlock);
  1174. /* Initializing Local Mutex Lock for SSR functionality */
  1175. mutex_init(&ssr_mutex);
  1176. ret = map_ipcmem(ipclite, "memory-region");
  1177. if (ret) {
  1178. IPCLITE_OS_LOG(IPCLITE_ERR, "failed to map ipcmem\n");
  1179. goto release;
  1180. }
  1181. mem = &(ipclite->ipcmem.mem);
  1182. memset(mem->virt_base, 0, mem->size);
  1183. ipcmem_init(&ipclite->ipcmem);
  1184. /* Set up sysfs for debug */
  1185. ret = ipclite_debug_sysfs_setup();
  1186. if (ret != IPCLITE_SUCCESS) {
  1187. IPCLITE_OS_LOG(IPCLITE_ERR, "Failed to Set up IPCLite Debug Sysfs\n");
  1188. goto release;
  1189. }
  1190. /* Mapping Debug Memory */
  1191. ret = ipclite_debug_info_setup();
  1192. if (ret != IPCLITE_SUCCESS) {
  1193. IPCLITE_OS_LOG(IPCLITE_ERR, "Failed to Set up IPCLite Debug Structures\n");
  1194. goto release;
  1195. }
  1196. /* Setup Channel for each Remote Subsystem */
  1197. for_each_available_child_of_node(pn, cn)
  1198. probe_subsystem(&pdev->dev, cn);
  1199. /* Broadcast init_done signal to all subsystems once mbox channels
  1200. * are set up
  1201. */
  1202. broadcast = ipclite->channel[IPCMEM_APPS];
  1203. ret = mbox_send_message(broadcast.irq_info[IPCLITE_MEM_INIT_SIGNAL].mbox_chan,
  1204. NULL);
  1205. if (ret < 0)
  1206. goto mem_release;
  1207. mbox_client_txdone(broadcast.irq_info[IPCLITE_MEM_INIT_SIGNAL].mbox_chan, 0);
  1208. if (global_atomic_support) {
  1209. ipclite->ipcmem.toc->ipclite_features.global_atomic_support =
  1210. GLOBAL_ATOMICS_ENABLED;
  1211. } else {
  1212. ipclite->ipcmem.toc->ipclite_features.global_atomic_support =
  1213. GLOBAL_ATOMICS_DISABLED;
  1214. }
  1215. IPCLITE_OS_LOG(IPCLITE_DBG, "global_atomic_support : %d\n",
  1216. ipclite->ipcmem.toc->ipclite_features.global_atomic_support);
  1217. /* hw mutex callbacks */
  1218. ipclite_hw_mutex->acquire = ipclite_hw_mutex_acquire;
  1219. ipclite_hw_mutex->release = ipclite_hw_mutex_release;
  1220. /* store to ipclite structure */
  1221. ipclite->ipclite_hw_mutex = ipclite_hw_mutex;
  1222. /* initialize hwlock owner to invalid host */
  1223. ipclite->ipcmem.toc->recovery.global_atomic_hwlock_owner = IPCMEM_INVALID_HOST;
  1224. /* Update the Global Debug variable for FW cores */
  1225. ipclite_dbg_info->debug_level = ipclite_debug_level;
  1226. ipclite_dbg_info->debug_control = ipclite_debug_control;
  1227. IPCLITE_OS_LOG(IPCLITE_INFO, "IPCLite probe completed successfully\n");
  1228. return ret;
  1229. mem_release:
  1230. /* If the remote subsystem has already completed the init and actively
  1231. * using IPCMEM, re-assigning IPCMEM memory back to HLOS can lead to crash
  1232. * Solution: Either we don't take back the memory or make sure APPS completes
  1233. * init before any other subsystem initializes IPCLite (we won't have to send
  1234. * braodcast)
  1235. */
  1236. release:
  1237. kfree(ipclite);
  1238. ipclite = NULL;
  1239. error:
  1240. IPCLITE_OS_LOG(IPCLITE_ERR, "IPCLite probe failed\n");
  1241. return ret;
  1242. }
  1243. static const struct of_device_id ipclite_of_match[] = {
  1244. { .compatible = "qcom,ipclite"},
  1245. {}
  1246. };
  1247. MODULE_DEVICE_TABLE(of, ipclite_of_match);
  1248. static struct platform_driver ipclite_driver = {
  1249. .probe = ipclite_probe,
  1250. .driver = {
  1251. .name = "ipclite",
  1252. .of_match_table = ipclite_of_match,
  1253. },
  1254. };
  1255. module_platform_driver(ipclite_driver);
  1256. MODULE_DESCRIPTION("IPCLite Driver");
  1257. MODULE_LICENSE("GPL v2");
  1258. MODULE_SOFTDEP("pre: qcom_hwspinlock");