mem-buf-msgq.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2021, The Linux Foundation. All rights reserved.
  4. * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
  5. */
  6. #include <linux/completion.h>
  7. #include <linux/gunyah/gh_msgq.h>
  8. #include <linux/idr.h>
  9. #include <linux/kernel.h>
  10. #include <linux/kthread.h>
  11. #include <linux/list.h>
  12. #include <linux/module.h>
  13. #include <linux/slab.h>
  14. #include <linux/sched/mm.h>
  15. #include "mem-buf-msgq.h"
  16. #include "trace-mem-buf.h"
  17. #define MEM_BUF_TIMEOUT_MS 3500
  18. /*
  19. * Data structures for tracking request/reply transactions, as well as message
  20. * queue usage
  21. */
  22. static DEFINE_MUTEX(mem_buf_msgq_list_lock);
  23. static LIST_HEAD(mem_buf_msgq_list);
  24. struct mem_buf_msgq_id {
  25. const char *name;
  26. int label;
  27. };
  28. static struct mem_buf_msgq_id mem_buf_msgqs[] = {
  29. {
  30. .name = "trusted_vm",
  31. .label = GH_MSGQ_LABEL_MEMBUF,
  32. },
  33. {
  34. },
  35. };
  36. /**
  37. * struct mem_buf_txn: Represents a transaction (request/response pair) in the
  38. * mem-buf-msgq driver.
  39. * @txn_id: Transaction ID used to match requests and responses (i.e. a new ID
  40. * is allocated per request, and the response will have a matching ID).
  41. * @txn_ret: The return value of the transaction.
  42. * @txn_done: Signals that a response has arrived.
  43. * @resp_buf: A pointer to a buffer where the response should be decoded into.
  44. */
  45. struct mem_buf_txn {
  46. int txn_id;
  47. int txn_ret;
  48. struct completion txn_done;
  49. void *resp_buf;
  50. };
  51. struct mem_buf_msgq_desc {
  52. const struct mem_buf_msgq_ops *msgq_ops;
  53. void *hdlr_data;
  54. struct mutex idr_mutex;
  55. struct idr txn_idr;
  56. void *msgq_hdl;
  57. struct task_struct *recv_thr;
  58. struct list_head list;
  59. };
  60. static size_t mem_buf_get_mem_type_alloc_req_size(enum mem_buf_mem_type type)
  61. {
  62. if (type == MEM_BUF_DMAHEAP_MEM_TYPE)
  63. return MEM_BUF_MAX_DMAHEAP_NAME_LEN;
  64. /* Do nothing for MEM_BUF_BUDDY_MEM_TYPE */
  65. return 0;
  66. }
  67. static void mem_buf_populate_alloc_req_arb_payload(void *dst, void *src,
  68. enum mem_buf_mem_type type)
  69. {
  70. if (type == MEM_BUF_DMAHEAP_MEM_TYPE)
  71. strscpy(dst, src, MEM_BUF_MAX_DMAHEAP_NAME_LEN);
  72. /* Do nothing for MEM_BUF_BUDDY_MEM_TYPE */
  73. }
  74. /*
  75. * mem_buf_construct_alloc_req: Constructs an allocation request message.
  76. * @mem_buf_txn: A valid transaction structure allocated by a call to mem_buf_init_txn().
  77. * @alloc_size: The size of the allocation to be requested.
  78. * @acl_desc: A GH ACL descriptor that describes who will have access to the memory allocated and
  79. * with what permissions.
  80. * @src_mem_type: The type of memory that will be used to satisfy the allocation.
  81. * @src_data: A pointer to auxiliary data required to satisfy the allocation.
  82. * @trans_type: One of GH_RM_TRANS_TYPE_DONATE/LEND/SHARE
  83. */
  84. void *mem_buf_construct_alloc_req(void *mem_buf_txn, size_t alloc_size,
  85. struct gh_acl_desc *acl_desc,
  86. enum mem_buf_mem_type src_mem_type, void *src_data,
  87. u32 trans_type)
  88. {
  89. size_t tot_size, alloc_req_size, acl_desc_size;
  90. void *req_buf, *arb_payload;
  91. unsigned int nr_acl_entries = acl_desc->n_acl_entries;
  92. struct mem_buf_alloc_req *req;
  93. struct mem_buf_txn *txn = mem_buf_txn;
  94. int txn_id = txn->txn_id;
  95. alloc_req_size = offsetof(struct mem_buf_alloc_req,
  96. acl_desc.acl_entries[nr_acl_entries]);
  97. tot_size = alloc_req_size +
  98. mem_buf_get_mem_type_alloc_req_size(src_mem_type);
  99. req_buf = kzalloc(tot_size, GFP_KERNEL);
  100. if (!req_buf)
  101. return ERR_PTR(-ENOMEM);
  102. req = req_buf;
  103. req->hdr.txn_id = txn_id;
  104. req->hdr.msg_type = MEM_BUF_ALLOC_REQ;
  105. req->hdr.msg_size = tot_size;
  106. req->size = alloc_size;
  107. req->src_mem_type = src_mem_type;
  108. req->trans_type = trans_type;
  109. acl_desc_size = offsetof(struct gh_acl_desc,
  110. acl_entries[nr_acl_entries]);
  111. memcpy(&req->acl_desc, acl_desc, acl_desc_size);
  112. arb_payload = req_buf + alloc_req_size;
  113. mem_buf_populate_alloc_req_arb_payload(arb_payload, src_data,
  114. src_mem_type);
  115. trace_send_alloc_req(req);
  116. return req_buf;
  117. }
  118. EXPORT_SYMBOL(mem_buf_construct_alloc_req);
  119. /*
  120. * mem_buf_construct_alloc_resp: Construct a response message to an allocation request.
  121. * @req_msg: The request message that is being replied to.
  122. * @alloc_ret: The return code of the allocation.
  123. * @memparcel_hdl: The memparcel handle that corresponds to the memory that was allocated.
  124. * sharing, or lending).
  125. */
  126. void *mem_buf_construct_alloc_resp(void *req_msg, s32 alloc_ret,
  127. gh_memparcel_handle_t memparcel_hdl,
  128. u32 obj_id)
  129. {
  130. struct mem_buf_alloc_req *req = req_msg;
  131. struct mem_buf_alloc_resp *resp_msg = kzalloc(sizeof(*resp_msg), GFP_KERNEL);
  132. if (!resp_msg)
  133. return ERR_PTR(-ENOMEM);
  134. resp_msg->hdr.txn_id = req->hdr.txn_id;
  135. resp_msg->hdr.msg_type = MEM_BUF_ALLOC_RESP;
  136. resp_msg->hdr.msg_size = sizeof(*resp_msg);
  137. resp_msg->ret = alloc_ret;
  138. resp_msg->hdl = memparcel_hdl;
  139. resp_msg->obj_id = obj_id;
  140. return resp_msg;
  141. }
  142. EXPORT_SYMBOL(mem_buf_construct_alloc_resp);
  143. /*
  144. * mem_buf_construct_relinquish_msg: Construct a relinquish message.
  145. * @mem_buf_txn: The transaction object.
  146. * @obj_id: Uniquely identifies an object.
  147. * @memparcel_hdl: The memparcel that corresponds to the memory that is being relinquished.
  148. */
  149. void *mem_buf_construct_relinquish_msg(void *mem_buf_txn, u32 obj_id,
  150. gh_memparcel_handle_t memparcel_hdl)
  151. {
  152. struct mem_buf_alloc_relinquish *relinquish_msg;
  153. struct mem_buf_txn *txn = mem_buf_txn;
  154. relinquish_msg = kzalloc(sizeof(*relinquish_msg), GFP_KERNEL);
  155. if (!relinquish_msg)
  156. return ERR_PTR(-ENOMEM);
  157. relinquish_msg->hdr.msg_type = MEM_BUF_ALLOC_RELINQUISH;
  158. relinquish_msg->hdr.msg_size = sizeof(*relinquish_msg);
  159. relinquish_msg->hdr.txn_id = txn->txn_id;
  160. relinquish_msg->obj_id = obj_id;
  161. relinquish_msg->hdl = memparcel_hdl;
  162. return relinquish_msg;
  163. }
  164. EXPORT_SYMBOL(mem_buf_construct_relinquish_msg);
  165. /*
  166. * mem_buf_construct_relinquish_resp: Construct a relinquish resp message.
  167. * @msg: The msg to reply to.
  168. */
  169. void *mem_buf_construct_relinquish_resp(void *_msg)
  170. {
  171. struct mem_buf_alloc_relinquish *msg = _msg;
  172. struct mem_buf_alloc_relinquish *resp;
  173. resp = kzalloc(sizeof(*resp), GFP_KERNEL);
  174. if (!resp)
  175. return ERR_PTR(-ENOMEM);
  176. resp->hdr.msg_type = MEM_BUF_ALLOC_RELINQUISH_RESP;
  177. resp->hdr.msg_size = sizeof(*resp);
  178. resp->hdr.txn_id = msg->hdr.txn_id;
  179. return resp;
  180. }
  181. EXPORT_SYMBOL(mem_buf_construct_relinquish_resp);
  182. int mem_buf_retrieve_txn_id(void *mem_buf_txn)
  183. {
  184. struct mem_buf_txn *txn = mem_buf_txn;
  185. return txn->txn_id;
  186. }
  187. EXPORT_SYMBOL(mem_buf_retrieve_txn_id);
  188. /*
  189. * mem_buf_init_txn: Allocates a mem-buf transaction that is used in request-response
  190. * message pairs.
  191. * @mem_buf_msgq_hdl: The handle for the message queue that will be used to transmit the message.
  192. * @resp_buf: The buffer that will store the output of the response from the recipient of the
  193. * request.
  194. */
  195. void *mem_buf_init_txn(void *mem_buf_msgq_hdl, void *resp_buf)
  196. {
  197. struct mem_buf_txn *txn;
  198. struct mem_buf_msgq_desc *desc = mem_buf_msgq_hdl;
  199. int ret;
  200. txn = kzalloc(sizeof(*txn), GFP_KERNEL);
  201. if (!txn)
  202. return ERR_PTR(-ENOMEM);
  203. mutex_lock(&desc->idr_mutex);
  204. ret = idr_alloc_cyclic(&desc->txn_idr, txn, 0, INT_MAX, GFP_KERNEL);
  205. if (ret < 0) {
  206. pr_err("%s: failed to allocate transaction id rc: %d\n", __func__, ret);
  207. mutex_unlock(&desc->idr_mutex);
  208. kfree(txn);
  209. return ERR_PTR(ret);
  210. }
  211. txn->txn_id = ret;
  212. init_completion(&txn->txn_done);
  213. txn->resp_buf = resp_buf;
  214. mutex_unlock(&desc->idr_mutex);
  215. return txn;
  216. }
  217. EXPORT_SYMBOL(mem_buf_init_txn);
  218. /*
  219. * mem_buf_msgq_send: Send a mem-buf message over a particular message queue.
  220. * @mem_buf_msgq_hdl: The handle for the message queue that will be used to send the message.
  221. * @msg: The message to be sent. This message must be a mem-buf allocation request, response, or
  222. * relinquish request.
  223. */
  224. int mem_buf_msgq_send(void *mem_buf_msgq_hdl, void *msg)
  225. {
  226. struct mem_buf_msgq_desc *desc = mem_buf_msgq_hdl;
  227. struct mem_buf_msg_hdr *hdr = msg;
  228. int ret;
  229. if (!(hdr->msg_type >= MEM_BUF_ALLOC_REQ && hdr->msg_type < MEM_BUF_ALLOC_REQ_MAX)) {
  230. pr_err("%s: message type invalid\n", __func__);
  231. return -EINVAL;
  232. }
  233. ret = gh_msgq_send(desc->msgq_hdl, msg, hdr->msg_size, 0);
  234. if (ret < 0)
  235. pr_err("%s: failed to send allocation request rc: %d\n", __func__, ret);
  236. else
  237. pr_debug("%s: alloc request sent\n", __func__);
  238. return ret;
  239. }
  240. EXPORT_SYMBOL(mem_buf_msgq_send);
  241. /*
  242. * mem_buf_txn_wait: Wait for a response for a particular request.
  243. * @mem_buf_msgq_hdl: The handle that corresponds to the message queue used for messaging.
  244. * @mem_buf_txn: A valid transaction which corresponds to a request that was sent.
  245. *
  246. * When this function returns successfully, the output of the response will be in the @resp_buf
  247. * parameter that was passed into mem_buf_txn_init().
  248. */
  249. int mem_buf_txn_wait(void *mem_buf_msgq_hdl, void *mem_buf_txn)
  250. {
  251. struct mem_buf_msgq_desc *desc = mem_buf_msgq_hdl;
  252. struct mem_buf_txn *txn = mem_buf_txn;
  253. int ret;
  254. pr_debug("%s: waiting for allocation response\n", __func__);
  255. ret = wait_for_completion_timeout(&txn->txn_done,
  256. msecs_to_jiffies(MEM_BUF_TIMEOUT_MS));
  257. /*
  258. * Recheck under lock.
  259. * Handle race condition where we receive a message immediately after
  260. * timing out above as complete() is called under idr_mutex.
  261. */
  262. mutex_lock(&desc->idr_mutex);
  263. if (!ret && !try_wait_for_completion(&txn->txn_done)) {
  264. pr_err("%s: timed out waiting for allocation response\n",
  265. __func__);
  266. ret = -ETIMEDOUT;
  267. } else {
  268. pr_debug("%s: alloc response received\n", __func__);
  269. ret = 0;
  270. }
  271. idr_remove(&desc->txn_idr, txn->txn_id);
  272. mutex_unlock(&desc->idr_mutex);
  273. return ret ? ret : txn->txn_ret;
  274. }
  275. EXPORT_SYMBOL(mem_buf_txn_wait);
  276. /*
  277. * mem_buf_destroy_txn: Releases all resources associated with a mem-buf transaction.
  278. * @mem_buf_msgq_hdl: The handle that corresponds to the message queue used for messaging.
  279. * @mem_buf_txn: The transaction structure that was involved in the messaging.
  280. */
  281. void mem_buf_destroy_txn(void *mem_buf_msgq_hdl, void *mem_buf_txn)
  282. {
  283. struct mem_buf_txn *txn = mem_buf_txn;
  284. kfree(txn);
  285. }
  286. EXPORT_SYMBOL(mem_buf_destroy_txn);
  287. static void mem_buf_process_alloc_resp(struct mem_buf_msgq_desc *desc, void *buf, size_t size)
  288. {
  289. struct mem_buf_msg_hdr *hdr = buf;
  290. struct mem_buf_alloc_resp *alloc_resp = buf;
  291. struct mem_buf_txn *txn;
  292. unsigned int noreclaim_flag;
  293. mutex_lock(&desc->idr_mutex);
  294. noreclaim_flag = memalloc_noreclaim_save();
  295. txn = idr_find(&desc->txn_idr, hdr->txn_id);
  296. if (!txn) {
  297. pr_err("%s no txn associated with id: %d\n", __func__, hdr->txn_id);
  298. /*
  299. * If this was a legitimate allocation, we should let the
  300. * allocator know that the memory is not in use, so that
  301. * it can be reclaimed.
  302. */
  303. if (!alloc_resp->ret) {
  304. desc->msgq_ops->relinquish_memparcel_hdl(desc->hdlr_data,
  305. alloc_resp->obj_id,
  306. alloc_resp->hdl);
  307. }
  308. } else {
  309. txn->txn_ret = desc->msgq_ops->alloc_resp_hdlr(desc->hdlr_data, buf, size,
  310. txn->resp_buf);
  311. complete(&txn->txn_done);
  312. }
  313. memalloc_noreclaim_restore(noreclaim_flag);
  314. mutex_unlock(&desc->idr_mutex);
  315. }
  316. static void mem_buf_process_relinquish_resp(struct mem_buf_msgq_desc *desc,
  317. void *buf, size_t size)
  318. {
  319. struct mem_buf_txn *txn;
  320. struct mem_buf_alloc_relinquish *relinquish_resp_msg = buf;
  321. if (size != sizeof(*relinquish_resp_msg)) {
  322. pr_err("%s response received is not of correct size\n",
  323. __func__);
  324. return;
  325. }
  326. trace_receive_relinquish_resp_msg(relinquish_resp_msg);
  327. mutex_lock(&desc->idr_mutex);
  328. txn = idr_find(&desc->txn_idr, relinquish_resp_msg->hdr.txn_id);
  329. if (!txn)
  330. pr_err("%s no txn associated with id: %d\n", __func__,
  331. relinquish_resp_msg->hdr.txn_id);
  332. else
  333. complete(&txn->txn_done);
  334. mutex_unlock(&desc->idr_mutex);
  335. }
  336. static void mem_buf_process_msg(struct mem_buf_msgq_desc *desc, void *buf, size_t size)
  337. {
  338. struct mem_buf_msg_hdr *hdr = buf;
  339. pr_debug("%s: mem-buf message received\n", __func__);
  340. if (size < sizeof(*hdr) || hdr->msg_size != size) {
  341. pr_err("%s: message received is not of a proper size: 0x%lx\n",
  342. __func__, size);
  343. return;
  344. }
  345. switch (hdr->msg_type) {
  346. case MEM_BUF_ALLOC_REQ:
  347. desc->msgq_ops->alloc_req_hdlr(desc->hdlr_data, buf, size);
  348. break;
  349. case MEM_BUF_ALLOC_RESP:
  350. mem_buf_process_alloc_resp(desc, buf, size);
  351. break;
  352. case MEM_BUF_ALLOC_RELINQUISH:
  353. desc->msgq_ops->relinquish_hdlr(desc->hdlr_data, buf, size);
  354. break;
  355. case MEM_BUF_ALLOC_RELINQUISH_RESP:
  356. mem_buf_process_relinquish_resp(desc, buf, size);
  357. break;
  358. default:
  359. pr_err("%s: received message of unknown type: %d\n", __func__,
  360. hdr->msg_type);
  361. }
  362. }
  363. static int mem_buf_msgq_name_to_msgq_label(const char *name)
  364. {
  365. int i;
  366. for (i = 0; i < ARRAY_SIZE(mem_buf_msgqs); i++)
  367. if (!strcmp(name, mem_buf_msgqs[i].name))
  368. return mem_buf_msgqs[i].label;
  369. return -EINVAL;
  370. }
  371. static int mem_buf_msgq_recv_fn(void *data)
  372. {
  373. struct mem_buf_msgq_desc *desc = data;
  374. void *buf;
  375. size_t size;
  376. int ret;
  377. buf = kzalloc(GH_MSGQ_MAX_MSG_SIZE_BYTES, GFP_KERNEL);
  378. if (!buf)
  379. return -ENOMEM;
  380. while (!kthread_should_stop()) {
  381. ret = gh_msgq_recv(desc->msgq_hdl, buf, GH_MSGQ_MAX_MSG_SIZE_BYTES, &size, 0);
  382. if (ret < 0) {
  383. pr_err_ratelimited("%s failed to receive message rc: %d\n", __func__, ret);
  384. } else {
  385. mem_buf_process_msg(desc, buf, size);
  386. }
  387. }
  388. kfree(buf);
  389. return 0;
  390. }
  391. void *mem_buf_msgq_register(const char *msgq_name, struct mem_buf_msgq_hdlr_info *info)
  392. {
  393. struct mem_buf_msgq_desc *desc = kzalloc(sizeof(*desc), GFP_KERNEL);
  394. int label;
  395. void *ret;
  396. if (!desc)
  397. return ERR_PTR(-ENOMEM);
  398. else if (!info || !info->msgq_ops || !info->msgq_ops->alloc_req_hdlr ||
  399. !info->msgq_ops->alloc_resp_hdlr || !info->msgq_ops->relinquish_hdlr)
  400. return ERR_PTR(-EINVAL);
  401. label = mem_buf_msgq_name_to_msgq_label(msgq_name);
  402. if (label < 0)
  403. return ERR_PTR(label);
  404. INIT_LIST_HEAD(&desc->list);
  405. desc->msgq_ops = info->msgq_ops;
  406. desc->hdlr_data = info->hdlr_data;
  407. mutex_init(&desc->idr_mutex);
  408. idr_init(&desc->txn_idr);
  409. desc->msgq_hdl = gh_msgq_register(label);
  410. if (IS_ERR(desc->msgq_hdl)) {
  411. ret = desc->msgq_hdl;
  412. pr_err("Message queue registration failed: rc: %d\n", PTR_ERR(desc->msgq_hdl));
  413. goto err_msgq_register;
  414. }
  415. mutex_lock(&mem_buf_msgq_list_lock);
  416. list_add_tail(&desc->list, &mem_buf_msgq_list);
  417. mutex_unlock(&mem_buf_msgq_list_lock);
  418. desc->recv_thr = kthread_run(mem_buf_msgq_recv_fn, desc, "mem_buf_%s_rcvr", msgq_name);
  419. if (IS_ERR(desc->recv_thr)) {
  420. ret = desc->recv_thr;
  421. pr_err("Failed to create msgq receiver thread rc: %d\n", PTR_ERR(desc->recv_thr));
  422. goto err_thr_create;
  423. }
  424. return desc;
  425. err_thr_create:
  426. gh_msgq_unregister(desc->msgq_hdl);
  427. err_msgq_register:
  428. idr_destroy(&desc->txn_idr);
  429. mutex_destroy(&desc->idr_mutex);
  430. kfree(desc);
  431. return ret;
  432. }
  433. EXPORT_SYMBOL(mem_buf_msgq_register);
  434. void mem_buf_msgq_unregister(void *mem_buf_msgq_hdl)
  435. {
  436. struct mem_buf_msgq_desc *desc = mem_buf_msgq_hdl;
  437. kthread_stop(desc->recv_thr);
  438. mutex_lock(&mem_buf_msgq_list_lock);
  439. list_del(&desc->list);
  440. mutex_unlock(&mem_buf_msgq_list_lock);
  441. gh_msgq_unregister(desc->msgq_hdl);
  442. idr_destroy(&desc->txn_idr);
  443. mutex_destroy(&desc->idr_mutex);
  444. kfree(desc);
  445. }
  446. EXPORT_SYMBOL(mem_buf_msgq_unregister);
  447. MODULE_LICENSE("GPL");