gh_msgq.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
  4. * Copyright (c) 2023, Qualcomm Innovation Center, Inc. All rights reserved.
  5. *
  6. */
  7. #include <linux/slab.h>
  8. #include <linux/wait.h>
  9. #include <linux/module.h>
  10. #include <linux/of.h>
  11. #include <linux/spinlock.h>
  12. #include <linux/interrupt.h>
  13. #include <linux/ratelimit.h>
  14. #include <linux/gunyah/gh_msgq.h>
  15. #include <linux/gunyah_rsc_mgr.h>
  16. #include "hcall_msgq.h"
  17. /* HVC call specific mask: 0 to 31 */
  18. #define GH_MSGQ_HVC_FLAGS_MASK GENMASK_ULL(31, 0)
  19. struct gh_msgq_cap_table;
  20. struct gh_msgq_desc {
  21. int label;
  22. struct gh_msgq_cap_table *cap_table;
  23. };
  24. struct gh_msgq_cap_table {
  25. struct gh_msgq_desc *client_desc;
  26. spinlock_t cap_entry_lock;
  27. gh_capid_t tx_cap_id;
  28. gh_capid_t rx_cap_id;
  29. int tx_irq;
  30. int rx_irq;
  31. const char *tx_irq_name;
  32. const char *rx_irq_name;
  33. spinlock_t tx_lock;
  34. spinlock_t rx_lock;
  35. bool tx_full;
  36. bool rx_empty;
  37. wait_queue_head_t tx_wq;
  38. wait_queue_head_t rx_wq;
  39. int label;
  40. struct list_head entry;
  41. };
  42. static LIST_HEAD(gh_msgq_cap_list);
  43. static DEFINE_SPINLOCK(gh_msgq_cap_list_lock);
  44. struct gh_msgq_cap_table *gh_msgq_alloc_entry(int label)
  45. {
  46. int ret;
  47. struct gh_msgq_cap_table *cap_table_entry = NULL;
  48. cap_table_entry = kzalloc(sizeof(struct gh_msgq_cap_table), GFP_ATOMIC);
  49. if (!cap_table_entry)
  50. return ERR_PTR(-ENOMEM);
  51. cap_table_entry->tx_cap_id = GH_CAPID_INVAL;
  52. cap_table_entry->rx_cap_id = GH_CAPID_INVAL;
  53. cap_table_entry->tx_full = false;
  54. cap_table_entry->rx_empty = true;
  55. cap_table_entry->label = label;
  56. init_waitqueue_head(&cap_table_entry->tx_wq);
  57. init_waitqueue_head(&cap_table_entry->rx_wq);
  58. spin_lock_init(&cap_table_entry->tx_lock);
  59. spin_lock_init(&cap_table_entry->rx_lock);
  60. spin_lock_init(&cap_table_entry->cap_entry_lock);
  61. cap_table_entry->tx_irq_name =
  62. kasprintf(GFP_ATOMIC, "gh_msgq_tx_%d", label);
  63. if (!cap_table_entry->tx_irq_name) {
  64. ret = -ENOMEM;
  65. goto err;
  66. }
  67. cap_table_entry->rx_irq_name =
  68. kasprintf(GFP_ATOMIC, "gh_msgq_rx_%d", label);
  69. if (!cap_table_entry->rx_irq_name) {
  70. ret = -ENOMEM;
  71. goto err;
  72. }
  73. list_add(&cap_table_entry->entry, &gh_msgq_cap_list);
  74. return cap_table_entry;
  75. err:
  76. kfree(cap_table_entry->tx_irq_name);
  77. kfree(cap_table_entry->rx_irq_name);
  78. kfree(cap_table_entry);
  79. return ERR_PTR(ret);
  80. }
  81. static irqreturn_t gh_msgq_rx_isr(int irq, void *dev)
  82. {
  83. struct gh_msgq_cap_table *cap_table_entry = dev;
  84. spin_lock(&cap_table_entry->rx_lock);
  85. cap_table_entry->rx_empty = false;
  86. spin_unlock(&cap_table_entry->rx_lock);
  87. wake_up_interruptible(&cap_table_entry->rx_wq);
  88. return IRQ_HANDLED;
  89. }
  90. static irqreturn_t gh_msgq_tx_isr(int irq, void *dev)
  91. {
  92. struct gh_msgq_cap_table *cap_table_entry = dev;
  93. spin_lock(&cap_table_entry->tx_lock);
  94. cap_table_entry->tx_full = false;
  95. spin_unlock(&cap_table_entry->tx_lock);
  96. wake_up_interruptible(&cap_table_entry->tx_wq);
  97. return IRQ_HANDLED;
  98. }
  99. static int __gh_msgq_recv(struct gh_msgq_cap_table *cap_table_entry,
  100. void *buff, size_t buff_size,
  101. size_t *recv_size, u64 rx_flags)
  102. {
  103. struct gh_hcall_msgq_recv_resp resp = {};
  104. unsigned long flags;
  105. int gh_ret;
  106. int ret = 0;
  107. /* Discard the driver specific flags, and keep only HVC specifics */
  108. rx_flags &= GH_MSGQ_HVC_FLAGS_MASK;
  109. spin_lock_irqsave(&cap_table_entry->rx_lock, flags);
  110. gh_ret = gh_hcall_msgq_recv(cap_table_entry->rx_cap_id, buff,
  111. buff_size, &resp);
  112. switch (gh_ret) {
  113. case GH_ERROR_OK:
  114. *recv_size = resp.recv_size;
  115. cap_table_entry->rx_empty = !resp.not_empty;
  116. ret = 0;
  117. break;
  118. case GH_ERROR_MSGQUEUE_EMPTY:
  119. cap_table_entry->rx_empty = true;
  120. ret = -EAGAIN;
  121. break;
  122. default:
  123. ret = gh_error_remap(gh_ret);
  124. }
  125. spin_unlock_irqrestore(&cap_table_entry->rx_lock, flags);
  126. if (ret != 0 && ret != -EAGAIN)
  127. pr_err("%s: Failed to recv from msgq. Hypercall error: %d\n",
  128. __func__, gh_ret);
  129. return ret;
  130. }
  131. /**
  132. * gh_msgq_recv: Receive a message from the client running on a different VM
  133. * @client_desc: The client descriptor that was obtained via gh_msgq_register()
  134. * @buff: Pointer to the buffer where the received data must be placed
  135. * @buff_size: The size of the buffer space available
  136. * @recv_size: The actual amount of data that is copied into buff
  137. * @flags: Optional flags to pass to receive the data. For the list of flags,
  138. * see linux/gunyah/gh_msgq.h
  139. *
  140. * The function returns 0 if the data is successfully received and recv_size
  141. * would contain the actual amount of data copied into buff.
  142. * It returns -EINVAL if the caller passes invalid arguments, -EAGAIN
  143. * if the message queue is not yet ready to communicate, and -EPERM if the
  144. * caller doesn't have permissions to receive the data. In all these failure
  145. * cases, recv_size is unmodified.
  146. *
  147. * Note: this function may sleep and should not be called from interrupt
  148. * context
  149. */
  150. int gh_msgq_recv(void *msgq_client_desc,
  151. void *buff, size_t buff_size,
  152. size_t *recv_size, unsigned long flags)
  153. {
  154. struct gh_msgq_desc *client_desc = msgq_client_desc;
  155. struct gh_msgq_cap_table *cap_table_entry;
  156. int ret;
  157. if (!client_desc || !buff || !buff_size || !recv_size)
  158. return -EINVAL;
  159. if (buff_size > GH_MSGQ_MAX_MSG_SIZE_BYTES)
  160. return -E2BIG;
  161. if (client_desc->cap_table == NULL)
  162. return -EAGAIN;
  163. cap_table_entry = client_desc->cap_table;
  164. spin_lock(&cap_table_entry->cap_entry_lock);
  165. if (cap_table_entry->client_desc != client_desc) {
  166. pr_err("%s: Invalid client descriptor\n", __func__);
  167. ret = -EINVAL;
  168. goto err;
  169. }
  170. if ((cap_table_entry->rx_cap_id == GH_CAPID_INVAL) &&
  171. (flags & GH_MSGQ_NONBLOCK)) {
  172. pr_err_ratelimited(
  173. "%s: Recv info for label %d not yet initialized\n",
  174. __func__, client_desc->label);
  175. ret = -EAGAIN;
  176. goto err;
  177. }
  178. spin_unlock(&cap_table_entry->cap_entry_lock);
  179. if (wait_event_interruptible(cap_table_entry->rx_wq,
  180. cap_table_entry->rx_cap_id != GH_CAPID_INVAL))
  181. return -ERESTARTSYS;
  182. spin_lock(&cap_table_entry->cap_entry_lock);
  183. if (!cap_table_entry->rx_irq) {
  184. pr_err_ratelimited("%s: Rx IRQ for label %d not yet setup\n",
  185. __func__, client_desc->label);
  186. ret = -EAGAIN;
  187. goto err;
  188. }
  189. spin_unlock(&cap_table_entry->cap_entry_lock);
  190. do {
  191. if (cap_table_entry->rx_empty && (flags & GH_MSGQ_NONBLOCK))
  192. return -EAGAIN;
  193. if (wait_event_interruptible(cap_table_entry->rx_wq,
  194. !cap_table_entry->rx_empty))
  195. return -ERESTARTSYS;
  196. ret = __gh_msgq_recv(cap_table_entry, buff, buff_size,
  197. recv_size, flags);
  198. } while (ret == -EAGAIN);
  199. if (!ret)
  200. print_hex_dump_debug(__func__, DUMP_PREFIX_OFFSET,
  201. 4, 1, buff, *recv_size, false);
  202. return ret;
  203. err:
  204. spin_unlock(&cap_table_entry->cap_entry_lock);
  205. return ret;
  206. }
  207. EXPORT_SYMBOL(gh_msgq_recv);
  208. static int __gh_msgq_send(struct gh_msgq_cap_table *cap_table_entry,
  209. void *buff, size_t size, u64 tx_flags)
  210. {
  211. struct gh_hcall_msgq_send_resp resp = {};
  212. unsigned long flags;
  213. int gh_ret;
  214. int ret = 0;
  215. /* Discard the driver specific flags, and keep only HVC specifics */
  216. tx_flags &= GH_MSGQ_HVC_FLAGS_MASK;
  217. print_hex_dump_debug("gh_msgq_send: ", DUMP_PREFIX_OFFSET,
  218. 4, 1, buff, size, false);
  219. spin_lock_irqsave(&cap_table_entry->tx_lock, flags);
  220. gh_ret = gh_hcall_msgq_send(cap_table_entry->tx_cap_id,
  221. size, buff, tx_flags, &resp);
  222. switch (gh_ret) {
  223. case GH_ERROR_OK:
  224. cap_table_entry->tx_full = !resp.not_full;
  225. ret = 0;
  226. break;
  227. case GH_ERROR_MSGQUEUE_FULL:
  228. cap_table_entry->tx_full = true;
  229. ret = -EAGAIN;
  230. break;
  231. default:
  232. ret = gh_error_remap(gh_ret);
  233. }
  234. spin_unlock_irqrestore(&cap_table_entry->tx_lock, flags);
  235. if (ret != 0 && ret != -EAGAIN)
  236. pr_err("%s: Failed to send on msgq. Hypercall error: %d\n",
  237. __func__, gh_ret);
  238. return ret;
  239. }
  240. /**
  241. * gh_msgq_send: Send a message to the client on a different VM
  242. * @client_desc: The client descriptor that was obtained via gh_msgq_register()
  243. * @buff: Pointer to the buffer that needs to be sent
  244. * @size: The size of the buffer
  245. * @flags: Optional flags to pass to send the data. For the list of flags,
  246. * see linux/gunyah/gh_msgq.h
  247. *
  248. * The function returns -EINVAL if the caller passes invalid arguments,
  249. * -EAGAIN if the message queue is not yet ready to communicate, and -EPERM if
  250. * the caller doesn't have permissions to send the data.
  251. *
  252. */
  253. int gh_msgq_send(void *msgq_client_desc,
  254. void *buff, size_t size, unsigned long flags)
  255. {
  256. struct gh_msgq_desc *client_desc = msgq_client_desc;
  257. struct gh_msgq_cap_table *cap_table_entry;
  258. int ret;
  259. if (!client_desc || !buff || !size)
  260. return -EINVAL;
  261. if (size > GH_MSGQ_MAX_MSG_SIZE_BYTES)
  262. return -E2BIG;
  263. if (client_desc->cap_table == NULL)
  264. return -EAGAIN;
  265. cap_table_entry = client_desc->cap_table;
  266. spin_lock(&cap_table_entry->cap_entry_lock);
  267. if (cap_table_entry->client_desc != client_desc) {
  268. pr_err("%s: Invalid client descriptor\n", __func__);
  269. ret = -EINVAL;
  270. goto err;
  271. }
  272. if ((cap_table_entry->tx_cap_id == GH_CAPID_INVAL) &&
  273. (flags & GH_MSGQ_NONBLOCK)) {
  274. pr_err_ratelimited(
  275. "%s: Send info for label %d not yet initialized\n",
  276. __func__, client_desc->label);
  277. ret = -EAGAIN;
  278. goto err;
  279. }
  280. spin_unlock(&cap_table_entry->cap_entry_lock);
  281. if (wait_event_interruptible(cap_table_entry->tx_wq,
  282. cap_table_entry->tx_cap_id != GH_CAPID_INVAL))
  283. return -ERESTARTSYS;
  284. spin_lock(&cap_table_entry->cap_entry_lock);
  285. if (!cap_table_entry->tx_irq) {
  286. pr_err_ratelimited("%s: Tx IRQ for label %d not yet setup\n",
  287. __func__, client_desc->label);
  288. ret = -EAGAIN;
  289. goto err;
  290. }
  291. spin_unlock(&cap_table_entry->cap_entry_lock);
  292. do {
  293. if (cap_table_entry->tx_full && (flags & GH_MSGQ_NONBLOCK))
  294. return -EAGAIN;
  295. if (wait_event_interruptible(cap_table_entry->tx_wq,
  296. !cap_table_entry->tx_full))
  297. return -ERESTARTSYS;
  298. ret = __gh_msgq_send(cap_table_entry, buff, size, flags);
  299. } while (ret == -EAGAIN);
  300. return ret;
  301. err:
  302. spin_unlock(&cap_table_entry->cap_entry_lock);
  303. return ret;
  304. }
  305. EXPORT_SYMBOL(gh_msgq_send);
  306. /**
  307. * gh_msgq_register: Register as a client to the use the message queue
  308. * @label: The label associated to the message queue that the client wants
  309. * to communicate
  310. *
  311. * The function returns a descriptor for the clients to send and receive the
  312. * messages. Else, returns -EBUSY if some other client is already regitsered
  313. * to this label, and -EINVAL for invalid arguments. The caller should check
  314. * the return value using IS_ERR_OR_NULL() and PTR_ERR() to extract the error
  315. * code.
  316. */
  317. void *gh_msgq_register(int label)
  318. {
  319. struct gh_msgq_cap_table *cap_table_entry = NULL, *tmp_entry;
  320. struct gh_msgq_desc *client_desc;
  321. if (label < 0)
  322. return ERR_PTR(-EINVAL);
  323. spin_lock(&gh_msgq_cap_list_lock);
  324. list_for_each_entry(tmp_entry, &gh_msgq_cap_list, entry) {
  325. if (label == tmp_entry->label) {
  326. cap_table_entry = tmp_entry;
  327. break;
  328. }
  329. }
  330. if (cap_table_entry == NULL) {
  331. cap_table_entry = gh_msgq_alloc_entry(label);
  332. if (IS_ERR(cap_table_entry)) {
  333. spin_unlock(&gh_msgq_cap_list_lock);
  334. return cap_table_entry;
  335. }
  336. }
  337. spin_unlock(&gh_msgq_cap_list_lock);
  338. spin_lock(&cap_table_entry->cap_entry_lock);
  339. /* Multiple clients cannot register to the same label (msgq) */
  340. if (cap_table_entry->client_desc) {
  341. spin_unlock(&cap_table_entry->cap_entry_lock);
  342. pr_err("%s: Client already exists for label %d\n",
  343. __func__, label);
  344. return ERR_PTR(-EBUSY);
  345. }
  346. client_desc = kzalloc(sizeof(*client_desc), GFP_ATOMIC);
  347. if (!client_desc) {
  348. spin_unlock(&cap_table_entry->cap_entry_lock);
  349. return ERR_PTR(-ENOMEM);
  350. }
  351. client_desc->label = label;
  352. client_desc->cap_table = cap_table_entry;
  353. cap_table_entry->client_desc = client_desc;
  354. spin_unlock(&cap_table_entry->cap_entry_lock);
  355. pr_info("gh_msgq: Registered client for label: %d\n", label);
  356. return client_desc;
  357. }
  358. EXPORT_SYMBOL(gh_msgq_register);
  359. /**
  360. * gh_msgq_unregister: Unregister as a client to the use the message queue
  361. * @client_desc: The descriptor that was passed via gh_msgq_register()
  362. *
  363. * The function returns 0 is the client was unregistered successfully. Else,
  364. * -EINVAL for invalid arguments.
  365. */
  366. int gh_msgq_unregister(void *msgq_client_desc)
  367. {
  368. struct gh_msgq_desc *client_desc = msgq_client_desc;
  369. struct gh_msgq_cap_table *cap_table_entry;
  370. if (!client_desc)
  371. return -EINVAL;
  372. cap_table_entry = client_desc->cap_table;
  373. spin_lock(&cap_table_entry->cap_entry_lock);
  374. /* Is the client trying to free someone else's msgq? */
  375. if (cap_table_entry->client_desc != client_desc) {
  376. pr_err("%s: Trying to free invalid client descriptor!\n",
  377. __func__);
  378. spin_unlock(&cap_table_entry->cap_entry_lock);
  379. return -EINVAL;
  380. }
  381. cap_table_entry->client_desc = NULL;
  382. spin_unlock(&cap_table_entry->cap_entry_lock);
  383. pr_info("%s: Unregistered client for label: %d\n",
  384. __func__, client_desc->label);
  385. kfree(client_desc);
  386. return 0;
  387. }
  388. EXPORT_SYMBOL(gh_msgq_unregister);
  389. int gh_msgq_populate_cap_info(int label, u64 cap_id, int direction, int irq)
  390. {
  391. struct gh_msgq_cap_table *cap_table_entry = NULL, *tmp_entry;
  392. int ret;
  393. if (label < 0) {
  394. pr_err("%s: Invalid label passed\n", __func__);
  395. return -EINVAL;
  396. }
  397. if (irq < 0) {
  398. pr_err("%s: Invalid IRQ number passed\n", __func__);
  399. return -ENXIO;
  400. }
  401. spin_lock(&gh_msgq_cap_list_lock);
  402. list_for_each_entry(tmp_entry, &gh_msgq_cap_list, entry) {
  403. if (label == tmp_entry->label) {
  404. cap_table_entry = tmp_entry;
  405. break;
  406. }
  407. }
  408. if (cap_table_entry == NULL) {
  409. cap_table_entry = gh_msgq_alloc_entry(label);
  410. if (IS_ERR(cap_table_entry)) {
  411. spin_unlock(&gh_msgq_cap_list_lock);
  412. return PTR_ERR(cap_table_entry);
  413. }
  414. }
  415. spin_unlock(&gh_msgq_cap_list_lock);
  416. if (direction == GH_MSGQ_DIRECTION_TX) {
  417. ret = request_irq(irq, gh_msgq_tx_isr, 0,
  418. cap_table_entry->tx_irq_name, cap_table_entry);
  419. if (ret < 0)
  420. goto err;
  421. spin_lock(&cap_table_entry->cap_entry_lock);
  422. cap_table_entry->tx_cap_id = cap_id;
  423. cap_table_entry->tx_irq = irq;
  424. spin_unlock(&cap_table_entry->cap_entry_lock);
  425. wake_up_interruptible(&cap_table_entry->tx_wq);
  426. } else if (direction == GH_MSGQ_DIRECTION_RX) {
  427. ret = request_irq(irq, gh_msgq_rx_isr, 0,
  428. cap_table_entry->rx_irq_name, cap_table_entry);
  429. if (ret < 0)
  430. goto err;
  431. spin_lock(&cap_table_entry->cap_entry_lock);
  432. cap_table_entry->rx_cap_id = cap_id;
  433. cap_table_entry->rx_irq = irq;
  434. spin_unlock(&cap_table_entry->cap_entry_lock);
  435. wake_up_interruptible(&cap_table_entry->rx_wq);
  436. } else {
  437. pr_err("%s: Invalid direction passed\n", __func__);
  438. ret = -EINVAL;
  439. goto err;
  440. }
  441. irq_set_irq_wake(irq, 1);
  442. pr_debug(
  443. "%s: label: %d; cap_id: %llu; dir: %d; irq: %d\n",
  444. __func__, label, cap_id, direction, irq);
  445. return 0;
  446. err:
  447. spin_lock(&gh_msgq_cap_list_lock);
  448. list_del(&cap_table_entry->entry);
  449. spin_unlock(&gh_msgq_cap_list_lock);
  450. kfree(cap_table_entry->tx_irq_name);
  451. kfree(cap_table_entry->rx_irq_name);
  452. kfree(cap_table_entry);
  453. return ret;
  454. }
  455. EXPORT_SYMBOL(gh_msgq_populate_cap_info);
  456. /**
  457. * gh_msgq_reset_cap_info: Reset the msgq cap info
  458. * @label: The label associated to the message queue that the client wants
  459. * to communicate
  460. * @direction: The direction of msgq
  461. * @irq: The irq associated with the msgq
  462. *
  463. * The function resets all the msgq related info.
  464. */
  465. int gh_msgq_reset_cap_info(enum gh_msgq_label label, int direction, int *irq)
  466. {
  467. struct gh_msgq_cap_table *cap_table_entry = NULL, *tmp_entry;
  468. int ret;
  469. if (label < 0) {
  470. pr_err("%s: Invalid label passed\n", __func__);
  471. return -EINVAL;
  472. }
  473. if (!irq)
  474. return -EINVAL;
  475. spin_lock(&gh_msgq_cap_list_lock);
  476. list_for_each_entry(tmp_entry, &gh_msgq_cap_list, entry) {
  477. if (label == tmp_entry->label) {
  478. cap_table_entry = tmp_entry;
  479. break;
  480. }
  481. }
  482. spin_unlock(&gh_msgq_cap_list_lock);
  483. if (cap_table_entry == NULL)
  484. return -EINVAL;
  485. if (direction == GH_MSGQ_DIRECTION_TX) {
  486. if (!cap_table_entry->tx_irq) {
  487. pr_err("%s: Tx IRQ not setup\n", __func__);
  488. ret = -ENXIO;
  489. goto err_unlock;
  490. }
  491. *irq = cap_table_entry->tx_irq;
  492. spin_lock(&cap_table_entry->cap_entry_lock);
  493. cap_table_entry->tx_cap_id = GH_CAPID_INVAL;
  494. cap_table_entry->tx_irq = 0;
  495. spin_unlock(&cap_table_entry->cap_entry_lock);
  496. } else if (direction == GH_MSGQ_DIRECTION_RX) {
  497. if (!cap_table_entry->rx_irq) {
  498. pr_err("%s: Rx IRQ not setup\n", __func__);
  499. ret = -ENXIO;
  500. goto err_unlock;
  501. }
  502. *irq = cap_table_entry->rx_irq;
  503. spin_lock(&cap_table_entry->cap_entry_lock);
  504. cap_table_entry->rx_cap_id = GH_CAPID_INVAL;
  505. cap_table_entry->rx_irq = 0;
  506. spin_unlock(&cap_table_entry->cap_entry_lock);
  507. } else {
  508. pr_err("%s: Invalid direction passed\n", __func__);
  509. ret = -EINVAL;
  510. goto err_unlock;
  511. }
  512. if (*irq)
  513. free_irq(*irq, cap_table_entry);
  514. return 0;
  515. err_unlock:
  516. return ret;
  517. }
  518. EXPORT_SYMBOL(gh_msgq_reset_cap_info);
  519. static void gh_msgq_cleanup(void)
  520. {
  521. struct gh_msgq_cap_table *cap_table_entry;
  522. struct gh_msgq_cap_table *temp;
  523. spin_lock(&gh_msgq_cap_list_lock);
  524. list_for_each_entry_safe(cap_table_entry, temp, &gh_msgq_cap_list, entry) {
  525. kfree(cap_table_entry->tx_irq_name);
  526. kfree(cap_table_entry->rx_irq_name);
  527. kfree(cap_table_entry);
  528. }
  529. spin_unlock(&gh_msgq_cap_list_lock);
  530. }
  531. static int __init ghd_msgq_init(void)
  532. {
  533. return 0;
  534. }
  535. module_init(ghd_msgq_init);
  536. static void __exit ghd_msgq_exit(void)
  537. {
  538. gh_msgq_cleanup();
  539. }
  540. module_exit(ghd_msgq_exit);
  541. MODULE_LICENSE("GPL");
  542. MODULE_DESCRIPTION("Qualcomm Technologies, Inc. Gunyah Message Queue Driver");