msm_memshare.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2013-2021, The Linux Foundation. All rights reserved.
  4. * Copyright (c) 2022-2024, Qualcomm Innovation Center, Inc. All rights reserved.
  5. */
  6. #include <linux/err.h>
  7. #include <linux/slab.h>
  8. #include <linux/module.h>
  9. #include <linux/dma-mapping.h>
  10. #include <linux/mutex.h>
  11. #include <linux/of_device.h>
  12. #include <linux/of_reserved_mem.h>
  13. #include <linux/platform_device.h>
  14. #include <linux/notifier.h>
  15. #include <linux/soc/qcom/qmi.h>
  16. #include <linux/remoteproc/qcom_rproc.h>
  17. #include <linux/rpmsg/qcom_glink.h>
  18. #include <linux/qcom_scm.h>
  19. #include "msm_memshare.h"
  20. #include "heap_mem_ext_v01.h"
  21. #include <soc/qcom/secure_buffer.h>
  22. #include <trace/events/rproc_qcom.h>
  23. /* Macros */
  24. static unsigned long(attrs);
  25. static struct qmi_handle *mem_share_svc_handle;
  26. static uint64_t bootup_request;
  27. /* Memshare Driver Structure */
  28. struct memshare_driver {
  29. struct device *dev;
  30. struct mutex mem_share;
  31. struct mutex mem_free;
  32. struct work_struct memshare_init_work;
  33. };
  34. struct memshare_child {
  35. struct device *dev;
  36. int client_id;
  37. struct qcom_glink_mem_entry *mem_entry;
  38. };
  39. static struct memshare_driver *memsh_drv;
  40. static struct memshare_child *memsh_child[MAX_CLIENTS];
  41. static struct mem_blocks memblock[MAX_CLIENTS];
  42. static uint32_t num_clients;
  43. static inline bool is_shared_mapping(struct mem_blocks *mb)
  44. {
  45. if (!mb)
  46. return false;
  47. return mb->hyp_map_info.num_vmids > 1;
  48. }
  49. static int check_client(int client_id, int proc, int request)
  50. {
  51. int i = 0;
  52. int found = DHMS_MEM_CLIENT_INVALID;
  53. for (i = 0; i < num_clients; i++) {
  54. if (memblock[i].client_id == client_id &&
  55. memblock[i].peripheral == proc) {
  56. found = i;
  57. break;
  58. }
  59. }
  60. if ((found == DHMS_MEM_CLIENT_INVALID) && !request) {
  61. dev_dbg(memsh_drv->dev,
  62. "memshare: No registered client for the client_id: %d, adding a new client\n",
  63. client_id);
  64. /* Add a new client */
  65. for (i = 0; i < MAX_CLIENTS; i++) {
  66. if (memblock[i].client_id == DHMS_MEM_CLIENT_INVALID) {
  67. memblock[i].client_id = client_id;
  68. memblock[i].allotted = 0;
  69. memblock[i].guarantee = 0;
  70. memblock[i].peripheral = proc;
  71. found = i;
  72. break;
  73. }
  74. }
  75. }
  76. return found;
  77. }
  78. static void free_client(int id)
  79. {
  80. memblock[id].phy_addr = 0;
  81. memblock[id].virtual_addr = 0;
  82. memblock[id].allotted = 0;
  83. memblock[id].guarantee = 0;
  84. memblock[id].sequence_id = -1;
  85. memblock[id].memory_type = MEMORY_CMA;
  86. }
  87. static void fill_alloc_response(struct mem_alloc_generic_resp_msg_v01 *resp,
  88. int id, int *flag)
  89. {
  90. resp->sequence_id_valid = 1;
  91. resp->sequence_id = memblock[id].sequence_id;
  92. resp->dhms_mem_alloc_addr_info_valid = 1;
  93. resp->dhms_mem_alloc_addr_info_len = 1;
  94. resp->dhms_mem_alloc_addr_info[0].phy_addr = memblock[id].phy_addr;
  95. resp->dhms_mem_alloc_addr_info[0].num_bytes = memblock[id].size;
  96. if (!*flag) {
  97. resp->resp.result = QMI_RESULT_SUCCESS_V01;
  98. resp->resp.error = QMI_ERR_NONE_V01;
  99. } else {
  100. resp->resp.result = QMI_RESULT_FAILURE_V01;
  101. resp->resp.error = QMI_ERR_NO_MEMORY_V01;
  102. }
  103. }
  104. static void initialize_client(void)
  105. {
  106. int i;
  107. for (i = 0; i < MAX_CLIENTS; i++) {
  108. memblock[i].allotted = 0;
  109. memblock[i].size = 0;
  110. memblock[i].guarantee = 0;
  111. memblock[i].phy_addr = 0;
  112. memblock[i].virtual_addr = 0;
  113. memblock[i].client_id = DHMS_MEM_CLIENT_INVALID;
  114. memblock[i].peripheral = -1;
  115. memblock[i].sequence_id = -1;
  116. memblock[i].memory_type = MEMORY_CMA;
  117. memblock[i].free_memory = 0;
  118. memblock[i].hyp_mapping = 0;
  119. }
  120. }
  121. static int modem_notifier_cb(struct notifier_block *this, unsigned long code,
  122. void *_cmd)
  123. {
  124. u64 source_vmids = 0;
  125. int i, j, ret, size = 0;
  126. struct qcom_scm_vmperm dest_vmids[] = {{QCOM_SCM_VMID_HLOS},
  127. {PERM_READ|PERM_WRITE|PERM_EXEC}};
  128. struct memshare_child *client_node = NULL;
  129. mutex_lock(&memsh_drv->mem_share);
  130. switch (code) {
  131. case QCOM_SSR_BEFORE_SHUTDOWN:
  132. trace_rproc_qcom_event("modem", "QCOM_SSR_BEFORE_SHUTDOWN", "modem_notifier-enter");
  133. bootup_request++;
  134. dev_info(memsh_drv->dev,
  135. "memshare: QCOM_SSR_BEFORE_SHUTDOWN: bootup_request:%d\n",
  136. bootup_request);
  137. for (i = 0; i < MAX_CLIENTS; i++)
  138. memblock[i].alloc_request = 0;
  139. break;
  140. case QCOM_SSR_AFTER_SHUTDOWN:
  141. trace_rproc_qcom_event("modem", "QCOM_SSR_AFTER_SHUTDOWN", "modem_notifier-enter");
  142. break;
  143. case QCOM_SSR_BEFORE_POWERUP:
  144. trace_rproc_qcom_event("modem", "QCOM_SSR_BEFORE_POWERUP", "modem_notifier-enter");
  145. break;
  146. case QCOM_SSR_AFTER_POWERUP:
  147. trace_rproc_qcom_event("modem", "QCOM_SSR_AFTER_POWERUP", "modem_notifier-enter");
  148. dev_info(memsh_drv->dev, "memshare: QCOM_SSR_AFTER_POWERUP: Modem has booted up\n");
  149. for (i = 0; i < MAX_CLIENTS; i++) {
  150. client_node = memsh_child[i];
  151. size = memblock[i].size;
  152. if (memblock[i].free_memory > 0 &&
  153. bootup_request >= 2) {
  154. memblock[i].free_memory -= 1;
  155. dev_dbg(memsh_drv->dev, "memshare: free_memory count: %d for client id: %d\n",
  156. memblock[i].free_memory,
  157. memblock[i].client_id);
  158. }
  159. if (memblock[i].free_memory == 0 &&
  160. memblock[i].peripheral ==
  161. DHMS_MEM_PROC_MPSS_V01 &&
  162. !memblock[i].guarantee &&
  163. !memblock[i].client_request &&
  164. memblock[i].allotted &&
  165. !memblock[i].alloc_request) {
  166. dev_info(memsh_drv->dev,
  167. "memshare: hypervisor unmapping for allocated memory with client id: %d\n",
  168. memblock[i].client_id);
  169. if (memblock[i].hyp_mapping) {
  170. struct memshare_hyp_mapping *source;
  171. source = &memblock[i].hyp_map_info;
  172. for (j = 0; j < source->num_vmids; j++)
  173. source_vmids |= BIT(source->vmids[j]);
  174. ret = qcom_scm_assign_mem(
  175. memblock[i].phy_addr,
  176. memblock[i].size,
  177. &source_vmids,
  178. dest_vmids, 1);
  179. if (ret &&
  180. memblock[i].hyp_mapping == 1) {
  181. /*
  182. * This is an error case as hyp
  183. * mapping was successful
  184. * earlier but during unmap
  185. * it lead to failure.
  186. */
  187. dev_err(memsh_drv->dev,
  188. "memshare: failed to hypervisor unmap the memory region for client id: %d\n",
  189. memblock[i].client_id);
  190. } else {
  191. memblock[i].hyp_mapping = 0;
  192. }
  193. }
  194. if (memblock[i].guard_band) {
  195. /*
  196. * Check if the client required guard band
  197. * support so the memory region of client's
  198. * size + guard bytes of 4K can be freed.
  199. */
  200. size += MEMSHARE_GUARD_BYTES;
  201. }
  202. dma_free_attrs(client_node->dev,
  203. size, memblock[i].virtual_addr,
  204. memblock[i].phy_addr,
  205. attrs);
  206. free_client(i);
  207. }
  208. }
  209. bootup_request++;
  210. break;
  211. default:
  212. break;
  213. }
  214. mutex_unlock(&memsh_drv->mem_share);
  215. dev_info(memsh_drv->dev,
  216. "memshare: notifier_cb processed for code: %d\n", code);
  217. trace_rproc_qcom_event("modem", "modem_notifier", "exit");
  218. return NOTIFY_DONE;
  219. }
  220. static struct notifier_block nb = {
  221. .notifier_call = modem_notifier_cb,
  222. };
  223. static void shared_hyp_mapping(int index)
  224. {
  225. u64 source_vmlist[] = {BIT(QCOM_SCM_VMID_HLOS)};
  226. struct memshare_hyp_mapping *dest;
  227. struct qcom_scm_vmperm *newvm;
  228. struct mem_blocks *mb;
  229. int ret, j;
  230. if (index >= MAX_CLIENTS) {
  231. dev_err(memsh_drv->dev,
  232. "memshare: hypervisor mapping failure for invalid client\n");
  233. return;
  234. }
  235. mb = &memblock[index];
  236. dest = &mb->hyp_map_info;
  237. newvm = kcalloc(dest->num_vmids, sizeof(struct qcom_scm_vmperm), GFP_KERNEL);
  238. if (!newvm)
  239. return;
  240. for (j = 0; j < dest->num_vmids; j++) {
  241. newvm[j].vmid = dest->vmids[j];
  242. newvm[j].perm = dest->perms[j];
  243. }
  244. ret = qcom_scm_assign_mem(mb->phy_addr, mb->size, source_vmlist,
  245. newvm, dest->num_vmids);
  246. kfree(newvm);
  247. if (ret != 0) {
  248. dev_err(memsh_drv->dev, "memshare: qcom_scm_assign_mem failed size: %u, err: %d\n",
  249. mb->size, ret);
  250. return;
  251. }
  252. mb->hyp_mapping = 1;
  253. }
  254. static void handle_alloc_generic_req(struct qmi_handle *handle,
  255. struct sockaddr_qrtr *sq, struct qmi_txn *txn, const void *decoded_msg)
  256. {
  257. struct mem_alloc_generic_req_msg_v01 *alloc_req;
  258. struct mem_alloc_generic_resp_msg_v01 *alloc_resp;
  259. struct memshare_child *client_node = NULL;
  260. int rc, resp = 0, i;
  261. int index = DHMS_MEM_CLIENT_INVALID;
  262. uint32_t size = 0;
  263. mutex_lock(&memsh_drv->mem_share);
  264. alloc_req = (struct mem_alloc_generic_req_msg_v01 *)decoded_msg;
  265. dev_info(memsh_drv->dev,
  266. "memshare_alloc: memory alloc request received for client id: %d, proc_id: %d, request size: %d\n",
  267. alloc_req->client_id, alloc_req->proc_id, alloc_req->num_bytes);
  268. alloc_resp = kzalloc(sizeof(*alloc_resp),
  269. GFP_KERNEL);
  270. if (!alloc_resp) {
  271. mutex_unlock(&memsh_drv->mem_share);
  272. return;
  273. }
  274. alloc_resp->resp.result = QMI_RESULT_FAILURE_V01;
  275. alloc_resp->resp.error = QMI_ERR_NO_MEMORY_V01;
  276. index = check_client(alloc_req->client_id, alloc_req->proc_id,
  277. CHECK);
  278. if (index >= MAX_CLIENTS) {
  279. dev_err(memsh_drv->dev,
  280. "memshare_alloc: client not found for index: %d, requested client: %d, proc_id: %d\n",
  281. index, alloc_req->client_id, alloc_req->proc_id);
  282. kfree(alloc_resp);
  283. alloc_resp = NULL;
  284. mutex_unlock(&memsh_drv->mem_share);
  285. return;
  286. }
  287. for (i = 0; i < num_clients; i++) {
  288. if (memsh_child[i]->client_id == alloc_req->client_id) {
  289. client_node = memsh_child[i];
  290. dev_info(memsh_drv->dev,
  291. "memshare_alloc: found client with client_id: %d, index: %d\n",
  292. alloc_req->client_id, index);
  293. break;
  294. }
  295. }
  296. if (!client_node) {
  297. dev_err(memsh_drv->dev,
  298. "memshare_alloc: No valid client node found\n");
  299. kfree(alloc_resp);
  300. alloc_resp = NULL;
  301. mutex_unlock(&memsh_drv->mem_share);
  302. return;
  303. }
  304. if (!memblock[index].allotted && alloc_req->num_bytes > 0) {
  305. if (alloc_req->num_bytes > memblock[index].init_size)
  306. alloc_req->num_bytes = memblock[index].init_size;
  307. if (memblock[index].guard_band)
  308. size = alloc_req->num_bytes + MEMSHARE_GUARD_BYTES;
  309. else
  310. size = alloc_req->num_bytes;
  311. rc = memshare_alloc(client_node->dev, size,
  312. &memblock[index]);
  313. if (rc) {
  314. dev_err(memsh_drv->dev,
  315. "memshare_alloc: unable to allocate memory of size: %d for requested client\n",
  316. size);
  317. resp = 1;
  318. }
  319. if (!resp) {
  320. memblock[index].free_memory += 1;
  321. memblock[index].allotted = 1;
  322. memblock[index].size = alloc_req->num_bytes;
  323. memblock[index].peripheral = alloc_req->proc_id;
  324. }
  325. }
  326. if (is_shared_mapping(&memblock[index])) {
  327. struct mem_blocks *mb = &memblock[index];
  328. client_node->mem_entry = qcom_glink_mem_entry_init(client_node->dev,
  329. mb->virtual_addr, mb->phy_addr, mb->size, mb->phy_addr);
  330. }
  331. dev_dbg(memsh_drv->dev,
  332. "memshare_alloc: free memory count for client id: %d = %d\n",
  333. memblock[index].client_id, memblock[index].free_memory);
  334. memblock[index].sequence_id = alloc_req->sequence_id;
  335. memblock[index].alloc_request = 1;
  336. fill_alloc_response(alloc_resp, index, &resp);
  337. /*
  338. * Perform the Hypervisor mapping in order to avoid XPU viloation
  339. * to the allocated region for Modem Clients
  340. */
  341. if (!memblock[index].hyp_mapping &&
  342. memblock[index].allotted)
  343. shared_hyp_mapping(index);
  344. mutex_unlock(&memsh_drv->mem_share);
  345. dev_info(memsh_drv->dev,
  346. "memshare_alloc: client_id: %d, alloc_resp.num_bytes: %d, alloc_resp.resp.result: %lx\n",
  347. alloc_req->client_id,
  348. alloc_resp->dhms_mem_alloc_addr_info[0].num_bytes,
  349. (unsigned long)alloc_resp->resp.result);
  350. rc = qmi_send_response(mem_share_svc_handle, sq, txn,
  351. MEM_ALLOC_GENERIC_RESP_MSG_V01,
  352. sizeof(struct mem_alloc_generic_resp_msg_v01),
  353. mem_alloc_generic_resp_msg_data_v01_ei, alloc_resp);
  354. if (rc < 0)
  355. dev_err(memsh_drv->dev,
  356. "memshare_alloc: Error sending the alloc response: %d\n",
  357. rc);
  358. kfree(alloc_resp);
  359. alloc_resp = NULL;
  360. }
  361. static void handle_free_generic_req(struct qmi_handle *handle,
  362. struct sockaddr_qrtr *sq, struct qmi_txn *txn, const void *decoded_msg)
  363. {
  364. u64 source_vmids = 0;
  365. struct mem_free_generic_req_msg_v01 *free_req;
  366. struct mem_free_generic_resp_msg_v01 free_resp;
  367. struct memshare_child *client_node = NULL;
  368. int rc, flag = 0, ret = 0, size = 0, i, j;
  369. int index = DHMS_MEM_CLIENT_INVALID;
  370. struct qcom_scm_vmperm dest_vmids[] = {{QCOM_SCM_VMID_HLOS},
  371. {PERM_READ|PERM_WRITE|PERM_EXEC}};
  372. mutex_lock(&memsh_drv->mem_free);
  373. free_req = (struct mem_free_generic_req_msg_v01 *)decoded_msg;
  374. memset(&free_resp, 0, sizeof(free_resp));
  375. free_resp.resp.error = QMI_ERR_INTERNAL_V01;
  376. free_resp.resp.result = QMI_RESULT_FAILURE_V01;
  377. dev_info(memsh_drv->dev,
  378. "memshare_free: handling memory free request with client id: %d, proc_id: %d\n",
  379. free_req->client_id, free_req->proc_id);
  380. index = check_client(free_req->client_id, free_req->proc_id, FREE);
  381. if (index >= MAX_CLIENTS) {
  382. dev_err(memsh_drv->dev, "memshare_free: invalid client request to free memory\n");
  383. flag = 1;
  384. }
  385. for (i = 0; i < num_clients; i++) {
  386. if (memsh_child[i]->client_id == free_req->client_id) {
  387. client_node = memsh_child[i];
  388. dev_info(memsh_drv->dev,
  389. "memshare_free: found client with client_id: %d, index: %d\n",
  390. free_req->client_id, index);
  391. break;
  392. }
  393. }
  394. if (!client_node) {
  395. dev_err(memsh_drv->dev,
  396. "memshare_free: No valid client node found\n");
  397. mutex_unlock(&memsh_drv->mem_free);
  398. return;
  399. }
  400. if (client_node->mem_entry) {
  401. qcom_glink_mem_entry_free(client_node->mem_entry);
  402. client_node->mem_entry = NULL;
  403. }
  404. if (!flag && !memblock[index].guarantee &&
  405. !memblock[index].client_request &&
  406. memblock[index].allotted) {
  407. struct memshare_hyp_mapping *source;
  408. dev_dbg(memsh_drv->dev,
  409. "memshare_free: hypervisor unmapping for free_req->client_id: %d - size: %d\n",
  410. free_req->client_id, memblock[index].size);
  411. source = &memblock[index].hyp_map_info;
  412. for (j = 0; j < source->num_vmids; j++)
  413. source_vmids |= BIT(source->vmids[j]);
  414. ret = qcom_scm_assign_mem(memblock[index].phy_addr, memblock[index].size,
  415. &source_vmids,
  416. dest_vmids, 1);
  417. if (ret && memblock[index].hyp_mapping == 1) {
  418. /*
  419. * This is an error case as hyp mapping was successful
  420. * earlier but during unmap it lead to failure.
  421. */
  422. dev_err(memsh_drv->dev,
  423. "memshare_free: failed to unmap the region for client id:%d\n",
  424. index);
  425. }
  426. size = memblock[index].size;
  427. if (memblock[index].guard_band) {
  428. /*
  429. * Check if the client required guard band support so
  430. * the memory region of client's size + guard
  431. * bytes of 4K can be freed
  432. */
  433. size += MEMSHARE_GUARD_BYTES;
  434. }
  435. dma_free_attrs(client_node->dev, size,
  436. memblock[index].virtual_addr,
  437. memblock[index].phy_addr,
  438. attrs);
  439. free_client(index);
  440. } else {
  441. dev_err(memsh_drv->dev,
  442. "memshare_free: cannot free the memory for a guaranteed client (client index: %d)\n",
  443. index);
  444. }
  445. if (flag) {
  446. free_resp.resp.result = QMI_RESULT_FAILURE_V01;
  447. free_resp.resp.error = QMI_ERR_INVALID_ID_V01;
  448. } else {
  449. free_resp.resp.result = QMI_RESULT_SUCCESS_V01;
  450. free_resp.resp.error = QMI_ERR_NONE_V01;
  451. }
  452. mutex_unlock(&memsh_drv->mem_free);
  453. rc = qmi_send_response(mem_share_svc_handle, sq, txn,
  454. MEM_FREE_GENERIC_RESP_MSG_V01,
  455. MEM_FREE_REQ_MAX_MSG_LEN_V01,
  456. mem_free_generic_resp_msg_data_v01_ei, &free_resp);
  457. if (rc < 0)
  458. dev_err(memsh_drv->dev,
  459. "memshare_free: error sending the free response: %d\n", rc);
  460. }
  461. static void handle_query_size_req(struct qmi_handle *handle,
  462. struct sockaddr_qrtr *sq, struct qmi_txn *txn, const void *decoded_msg)
  463. {
  464. int rc, index = DHMS_MEM_CLIENT_INVALID;
  465. struct mem_query_size_req_msg_v01 *query_req;
  466. struct mem_query_size_rsp_msg_v01 *query_resp;
  467. mutex_lock(&memsh_drv->mem_share);
  468. query_req = (struct mem_query_size_req_msg_v01 *)decoded_msg;
  469. query_resp = kzalloc(sizeof(*query_resp),
  470. GFP_KERNEL);
  471. if (!query_resp) {
  472. mutex_unlock(&memsh_drv->mem_share);
  473. return;
  474. }
  475. dev_dbg(memsh_drv->dev,
  476. "memshare_query: query on availalbe memory size for client id: %d, proc_id: %d\n",
  477. query_req->client_id, query_req->proc_id);
  478. index = check_client(query_req->client_id, query_req->proc_id,
  479. CHECK);
  480. if (index >= MAX_CLIENTS) {
  481. dev_err(memsh_drv->dev,
  482. "memshare_query: client not found, requested client: %d, proc_id: %d\n",
  483. query_req->client_id, query_req->proc_id);
  484. kfree(query_resp);
  485. query_resp = NULL;
  486. mutex_unlock(&memsh_drv->mem_share);
  487. return;
  488. }
  489. if (memblock[index].init_size) {
  490. query_resp->size_valid = 1;
  491. query_resp->size = memblock[index].init_size;
  492. } else {
  493. query_resp->size_valid = 1;
  494. query_resp->size = 0;
  495. }
  496. query_resp->resp.result = QMI_RESULT_SUCCESS_V01;
  497. query_resp->resp.error = QMI_ERR_NONE_V01;
  498. mutex_unlock(&memsh_drv->mem_share);
  499. dev_info(memsh_drv->dev,
  500. "memshare_query: client_id : %d, query_resp.size :%d, query_resp.resp.result :%lx\n",
  501. query_req->client_id, query_resp->size,
  502. (unsigned long)query_resp->resp.result);
  503. rc = qmi_send_response(mem_share_svc_handle, sq, txn,
  504. MEM_QUERY_SIZE_RESP_MSG_V01,
  505. MEM_QUERY_MAX_MSG_LEN_V01,
  506. mem_query_size_resp_msg_data_v01_ei, query_resp);
  507. if (rc < 0)
  508. dev_err(memsh_drv->dev,
  509. "memshare_query: Error sending the query response: %d\n", rc);
  510. kfree(query_resp);
  511. query_resp = NULL;
  512. }
  513. static void mem_share_svc_disconnect_cb(struct qmi_handle *qmi,
  514. unsigned int node, unsigned int port)
  515. {
  516. }
  517. static struct qmi_ops server_ops = {
  518. .del_client = mem_share_svc_disconnect_cb,
  519. };
  520. static struct qmi_msg_handler qmi_memshare_handlers[] = {
  521. {
  522. .type = QMI_REQUEST,
  523. .msg_id = MEM_ALLOC_GENERIC_REQ_MSG_V01,
  524. .ei = mem_alloc_generic_req_msg_data_v01_ei,
  525. .decoded_size = sizeof(struct mem_alloc_generic_req_msg_v01),
  526. .fn = handle_alloc_generic_req,
  527. },
  528. {
  529. .type = QMI_REQUEST,
  530. .msg_id = MEM_FREE_GENERIC_REQ_MSG_V01,
  531. .ei = mem_free_generic_req_msg_data_v01_ei,
  532. .decoded_size = sizeof(struct mem_free_generic_req_msg_v01),
  533. .fn = handle_free_generic_req,
  534. },
  535. {
  536. .type = QMI_REQUEST,
  537. .msg_id = MEM_QUERY_SIZE_REQ_MSG_V01,
  538. .ei = mem_query_size_req_msg_data_v01_ei,
  539. .decoded_size = sizeof(struct mem_query_size_req_msg_v01),
  540. .fn = handle_query_size_req,
  541. },
  542. {}
  543. };
  544. int memshare_alloc(struct device *dev,
  545. unsigned int block_size,
  546. struct mem_blocks *pblk)
  547. {
  548. dev_dbg(memsh_drv->dev,
  549. "memshare: allocation request for size: %d", block_size);
  550. if (!pblk) {
  551. dev_err(memsh_drv->dev,
  552. "memshare: Failed memory block allocation\n");
  553. return -ENOMEM;
  554. }
  555. pblk->virtual_addr = dma_alloc_attrs(dev, block_size,
  556. &pblk->phy_addr, GFP_KERNEL,
  557. attrs);
  558. if (pblk->virtual_addr == NULL)
  559. return -ENOMEM;
  560. return 0;
  561. }
  562. static void memshare_init_worker(struct work_struct *work)
  563. {
  564. int rc;
  565. mem_share_svc_handle = kzalloc(sizeof(struct qmi_handle),
  566. GFP_KERNEL);
  567. if (!mem_share_svc_handle)
  568. return;
  569. rc = qmi_handle_init(mem_share_svc_handle,
  570. sizeof(struct qmi_elem_info),
  571. &server_ops, qmi_memshare_handlers);
  572. if (rc < 0) {
  573. dev_err(memsh_drv->dev,
  574. "memshare: Creating mem_share_svc qmi handle failed\n");
  575. kfree(mem_share_svc_handle);
  576. mem_share_svc_handle = NULL;
  577. return;
  578. }
  579. rc = qmi_add_server(mem_share_svc_handle, MEM_SHARE_SERVICE_SVC_ID,
  580. MEM_SHARE_SERVICE_VERS, MEM_SHARE_SERVICE_INS_ID);
  581. if (rc < 0) {
  582. dev_err(memsh_drv->dev,
  583. "memshare: Registering mem share svc failed %d\n", rc);
  584. if (mem_share_svc_handle) {
  585. qmi_handle_release(mem_share_svc_handle);
  586. kfree(mem_share_svc_handle);
  587. mem_share_svc_handle = NULL;
  588. }
  589. return;
  590. }
  591. dev_dbg(memsh_drv->dev, "memshare: memshare_init successful\n");
  592. }
  593. static int memshare_child_probe(struct platform_device *pdev)
  594. {
  595. int rc;
  596. uint32_t size, client_id;
  597. const char *name;
  598. struct memshare_child *drv;
  599. struct device_node *mem_node;
  600. drv = devm_kzalloc(&pdev->dev, sizeof(struct memshare_child),
  601. GFP_KERNEL);
  602. if (!drv)
  603. return -ENOMEM;
  604. drv->dev = &pdev->dev;
  605. platform_set_drvdata(pdev, drv);
  606. rc = of_property_read_u32(pdev->dev.of_node, "qcom,peripheral-size",
  607. &size);
  608. if (rc) {
  609. dev_err(drv->dev, "memshare: Error reading size of clients, rc: %d\n",
  610. rc);
  611. return rc;
  612. }
  613. rc = of_property_read_u32(pdev->dev.of_node, "qcom,client-id",
  614. &client_id);
  615. if (rc) {
  616. dev_err(memsh_drv->dev, "memshare: Error reading client id, rc: %d\n",
  617. rc);
  618. return rc;
  619. }
  620. memblock[num_clients].guarantee = of_property_read_bool(
  621. pdev->dev.of_node,
  622. "qcom,allocate-boot-time");
  623. memblock[num_clients].client_request = of_property_read_bool(
  624. pdev->dev.of_node,
  625. "qcom,allocate-on-request");
  626. memblock[num_clients].guard_band = of_property_read_bool(
  627. pdev->dev.of_node,
  628. "qcom,guard-band");
  629. /* If the shared property is set, allow access from both HLOS and peripheral */
  630. if (of_property_read_bool(pdev->dev.of_node, "qcom,shared")) {
  631. memblock[num_clients].hyp_map_info.num_vmids = 2;
  632. memblock[num_clients].hyp_map_info.vmids[0] = VMID_HLOS;
  633. memblock[num_clients].hyp_map_info.vmids[1] = VMID_MSS_MSA;
  634. memblock[num_clients].hyp_map_info.perms[0] = PERM_READ | PERM_WRITE;
  635. memblock[num_clients].hyp_map_info.perms[1] = PERM_READ | PERM_WRITE;
  636. } else {
  637. memblock[num_clients].hyp_map_info.num_vmids = 1;
  638. memblock[num_clients].hyp_map_info.vmids[0] = VMID_MSS_MSA;
  639. memblock[num_clients].hyp_map_info.perms[0] = PERM_READ | PERM_WRITE;
  640. }
  641. rc = of_property_read_string(pdev->dev.of_node, "label",
  642. &name);
  643. if (rc) {
  644. dev_err(memsh_drv->dev, "memshare: Error reading peripheral info for client, rc: %d\n",
  645. rc);
  646. return rc;
  647. }
  648. if (strcmp(name, "modem") == 0)
  649. memblock[num_clients].peripheral = DHMS_MEM_PROC_MPSS_V01;
  650. else if (strcmp(name, "adsp") == 0)
  651. memblock[num_clients].peripheral = DHMS_MEM_PROC_ADSP_V01;
  652. else if (strcmp(name, "wcnss") == 0)
  653. memblock[num_clients].peripheral = DHMS_MEM_PROC_WCNSS_V01;
  654. memblock[num_clients].init_size = size;
  655. memblock[num_clients].client_id = client_id;
  656. drv->client_id = client_id;
  657. mem_node = of_parse_phandle(pdev->dev.of_node, "memory-region", 0);
  658. of_node_put(mem_node);
  659. if (mem_node) {
  660. rc = of_reserved_mem_device_init(&pdev->dev);
  661. if (rc) {
  662. dev_err(&pdev->dev, "memshare: Failed to initialize memory region rc: %d\n",
  663. rc);
  664. return rc;
  665. }
  666. dev_info(&pdev->dev, "memshare: Memory allocation from shared DMA pool\n");
  667. } else {
  668. dev_info(&pdev->dev, "memshare: Continuing with allocation from CMA\n");
  669. }
  670. /*
  671. * Memshare allocation for guaranteed clients
  672. */
  673. if (memblock[num_clients].guarantee && size > 0) {
  674. if (memblock[num_clients].guard_band)
  675. size += MEMSHARE_GUARD_BYTES;
  676. rc = memshare_alloc(drv->dev,
  677. size,
  678. &memblock[num_clients]);
  679. if (rc) {
  680. dev_err(memsh_drv->dev,
  681. "memshare_child: Unable to allocate memory for guaranteed clients, rc: %d\n",
  682. rc);
  683. mem_node = of_parse_phandle(pdev->dev.of_node,
  684. "memory-region", 0);
  685. of_node_put(mem_node);
  686. if (mem_node)
  687. of_reserved_mem_device_release(&pdev->dev);
  688. return rc;
  689. }
  690. memblock[num_clients].size = size;
  691. memblock[num_clients].allotted = 1;
  692. shared_hyp_mapping(num_clients);
  693. }
  694. memsh_child[num_clients] = drv;
  695. num_clients++;
  696. return 0;
  697. }
  698. static int memshare_probe(struct platform_device *pdev)
  699. {
  700. int rc;
  701. struct memshare_driver *drv;
  702. struct device *dev = &pdev->dev;
  703. if (of_device_is_compatible(dev->of_node,
  704. "qcom,memshare-peripheral"))
  705. return memshare_child_probe(pdev);
  706. drv = devm_kzalloc(&pdev->dev, sizeof(struct memshare_driver),
  707. GFP_KERNEL);
  708. if (!drv)
  709. return -ENOMEM;
  710. drv->dev = &pdev->dev;
  711. memsh_drv = drv;
  712. platform_set_drvdata(pdev, memsh_drv);
  713. /* Memory allocation has been done successfully */
  714. mutex_init(&drv->mem_free);
  715. mutex_init(&drv->mem_share);
  716. INIT_WORK(&drv->memshare_init_work, memshare_init_worker);
  717. schedule_work(&drv->memshare_init_work);
  718. initialize_client();
  719. num_clients = 0;
  720. rc = of_platform_populate(pdev->dev.of_node, NULL, NULL,
  721. &pdev->dev);
  722. if (rc) {
  723. dev_err(memsh_drv->dev,
  724. "memshare: error populating the devices\n");
  725. return rc;
  726. }
  727. qcom_register_ssr_notifier("modem", &nb);
  728. dev_dbg(memsh_drv->dev, "memshare: Memshare inited\n");
  729. return 0;
  730. }
  731. static int memshare_remove(struct platform_device *pdev)
  732. {
  733. if (!memsh_drv)
  734. return 0;
  735. if (mem_share_svc_handle) {
  736. qmi_handle_release(mem_share_svc_handle);
  737. kfree(mem_share_svc_handle);
  738. mem_share_svc_handle = NULL;
  739. }
  740. return 0;
  741. }
  742. static const struct of_device_id memshare_match_table[] = {
  743. { .compatible = "qcom,memshare", },
  744. { .compatible = "qcom,memshare-peripheral", },
  745. {}
  746. };
  747. MODULE_DEVICE_TABLE(of, memshare_match_table);
  748. static struct platform_driver memshare_pdriver = {
  749. .probe = memshare_probe,
  750. .remove = memshare_remove,
  751. .driver = {
  752. .name = "memshare",
  753. .of_match_table = memshare_match_table,
  754. },
  755. };
  756. module_platform_driver(memshare_pdriver);
  757. MODULE_DESCRIPTION("Mem Share QMI Service Driver");
  758. MODULE_LICENSE("GPL");