msm_audio_ion_vm.c 24 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2013-2020, The Linux Foundation. All rights reserved.
  4. * Copyright (c) 2021 Qualcomm Innovation Center, Inc. All rights reserved.
  5. */
  6. #include <linux/init.h>
  7. #include <linux/kernel.h>
  8. #include <linux/module.h>
  9. #include <linux/err.h>
  10. #include <linux/delay.h>
  11. #include <linux/slab.h>
  12. #include <linux/mutex.h>
  13. #include <linux/list.h>
  14. #include <linux/dma-mapping.h>
  15. #include <linux/dma-buf.h>
  16. #include <linux/iosys-map.h>
  17. #include <linux/iommu.h>
  18. #include <linux/platform_device.h>
  19. #include <linux/of_device.h>
  20. #include <linux/export.h>
  21. #include <ipc/apr.h>
  22. #include <dsp/msm_audio_ion.h>
  23. #include <linux/habmm.h>
  24. #define MSM_AUDIO_ION_PROBED (1 << 0)
  25. #define MSM_AUDIO_ION_PHYS_ADDR(alloc_data) \
  26. alloc_data->table->sgl->dma_address
  27. #define MSM_AUDIO_SMMU_VM_CMD_MAP 0x00000001
  28. #define MSM_AUDIO_SMMU_VM_CMD_UNMAP 0x00000002
  29. #define MSM_AUDIO_SMMU_VM_HAB_MINOR_ID 1
  30. struct msm_audio_ion_private {
  31. bool smmu_enabled;
  32. struct device *cb_dev;
  33. struct device *cb_cma_dev;
  34. u8 device_status;
  35. struct list_head alloc_list;
  36. struct mutex list_mutex;
  37. };
  38. struct msm_audio_alloc_data {
  39. size_t len;
  40. struct iosys_map *vmap;
  41. struct dma_buf *dma_buf;
  42. struct dma_buf_attachment *attach;
  43. struct sg_table *table;
  44. struct list_head list;
  45. u32 export_id;
  46. };
  47. struct msm_audio_smmu_vm_map_cmd {
  48. int cmd_id;
  49. u32 export_id;
  50. u32 buf_size;
  51. };
  52. struct msm_audio_smmu_vm_map_cmd_rsp {
  53. int status;
  54. u64 addr;
  55. };
  56. struct msm_audio_smmu_vm_unmap_cmd {
  57. int cmd_id;
  58. u32 export_id;
  59. };
  60. struct msm_audio_smmu_vm_unmap_cmd_rsp {
  61. int status;
  62. };
  63. static struct msm_audio_ion_private msm_audio_ion_data = {0,};
  64. static u32 msm_audio_ion_hab_handle;
  65. static void msm_audio_ion_add_allocation(
  66. struct msm_audio_ion_private *msm_audio_ion_data,
  67. struct msm_audio_alloc_data *alloc_data)
  68. {
  69. /*
  70. * Since these APIs can be invoked by multiple
  71. * clients, there is need to make sure the list
  72. * of allocations is always protected
  73. */
  74. mutex_lock(&(msm_audio_ion_data->list_mutex));
  75. list_add_tail(&(alloc_data->list),
  76. &(msm_audio_ion_data->alloc_list));
  77. mutex_unlock(&(msm_audio_ion_data->list_mutex));
  78. }
  79. static int msm_audio_dma_buf_map(struct dma_buf *dma_buf,
  80. dma_addr_t *addr, size_t *len,
  81. bool cma_mem)
  82. {
  83. struct msm_audio_alloc_data *alloc_data = NULL;
  84. struct device *cb_dev;
  85. unsigned long ionflag = 0;
  86. int rc = 0;
  87. if (cma_mem)
  88. cb_dev = msm_audio_ion_data.cb_cma_dev;
  89. else
  90. cb_dev = msm_audio_ion_data.cb_dev;
  91. /* Data required per buffer mapping */
  92. alloc_data = kzalloc(sizeof(*alloc_data), GFP_KERNEL);
  93. if (!alloc_data)
  94. return -ENOMEM;
  95. alloc_data->dma_buf = dma_buf;
  96. alloc_data->len = dma_buf->size;
  97. *len = dma_buf->size;
  98. /* Attach the dma_buf to context bank device */
  99. alloc_data->attach = dma_buf_attach(alloc_data->dma_buf,
  100. cb_dev);
  101. if (IS_ERR(alloc_data->attach)) {
  102. rc = PTR_ERR(alloc_data->attach);
  103. dev_err(cb_dev,
  104. "%s: Fail to attach dma_buf to CB, rc = %d\n",
  105. __func__, rc);
  106. goto free_alloc_data;
  107. }
  108. /* For uncached buffers, avoid cache maintanance */
  109. rc = dma_buf_get_flags(alloc_data->dma_buf, &ionflag);
  110. if (rc) {
  111. dev_err(cb_dev, "%s: dma_buf_get_flags failed: %d\n",
  112. __func__, rc);
  113. goto detach_dma_buf;
  114. }
  115. if (!(ionflag & ION_FLAG_CACHED))
  116. alloc_data->attach->dma_map_attrs |= DMA_ATTR_SKIP_CPU_SYNC;
  117. /*
  118. * Get the scatter-gather list.
  119. * There is no info as this is a write buffer or
  120. * read buffer, hence the request is bi-directional
  121. * to accommodate both read and write mappings.
  122. */
  123. alloc_data->table = dma_buf_map_attachment(alloc_data->attach,
  124. DMA_BIDIRECTIONAL);
  125. if (IS_ERR(alloc_data->table)) {
  126. rc = PTR_ERR(alloc_data->table);
  127. dev_err(cb_dev,
  128. "%s: Fail to map attachment, rc = %d\n",
  129. __func__, rc);
  130. goto detach_dma_buf;
  131. }
  132. /* physical address from mapping */
  133. *addr = MSM_AUDIO_ION_PHYS_ADDR(alloc_data);
  134. msm_audio_ion_add_allocation(&msm_audio_ion_data,
  135. alloc_data);
  136. return rc;
  137. detach_dma_buf:
  138. dma_buf_detach(alloc_data->dma_buf,
  139. alloc_data->attach);
  140. free_alloc_data:
  141. kfree(alloc_data);
  142. alloc_data = NULL;
  143. return rc;
  144. }
  145. static int msm_audio_dma_buf_unmap(struct dma_buf *dma_buf, bool cma_mem)
  146. {
  147. int rc = 0;
  148. struct msm_audio_alloc_data *alloc_data = NULL;
  149. struct list_head *ptr, *next;
  150. struct device *cb_dev;
  151. bool found = false;
  152. if (cma_mem)
  153. cb_dev = msm_audio_ion_data.cb_cma_dev;
  154. else
  155. cb_dev = msm_audio_ion_data.cb_dev;
  156. /*
  157. * Though list_for_each_safe is delete safe, lock
  158. * should be explicitly acquired to avoid race condition
  159. * on adding elements to the list.
  160. */
  161. mutex_lock(&(msm_audio_ion_data.list_mutex));
  162. list_for_each_safe(ptr, next,
  163. &(msm_audio_ion_data.alloc_list)) {
  164. alloc_data = list_entry(ptr, struct msm_audio_alloc_data,
  165. list);
  166. if (alloc_data->dma_buf == dma_buf) {
  167. found = true;
  168. dma_buf_unmap_attachment(alloc_data->attach,
  169. alloc_data->table,
  170. DMA_BIDIRECTIONAL);
  171. dma_buf_detach(alloc_data->dma_buf,
  172. alloc_data->attach);
  173. dma_buf_put(alloc_data->dma_buf);
  174. list_del(&(alloc_data->list));
  175. kfree(alloc_data);
  176. alloc_data = NULL;
  177. break;
  178. }
  179. }
  180. mutex_unlock(&(msm_audio_ion_data.list_mutex));
  181. if (!found) {
  182. dev_err(cb_dev,
  183. "%s: cannot find allocation, dma_buf %pK",
  184. __func__, dma_buf);
  185. rc = -EINVAL;
  186. }
  187. return rc;
  188. }
  189. static int msm_audio_ion_smmu_map(struct dma_buf *dma_buf,
  190. dma_addr_t *paddr, size_t *len)
  191. {
  192. int rc;
  193. u32 export_id;
  194. u32 cmd_rsp_size;
  195. bool found = false;
  196. bool exported = false;
  197. struct msm_audio_smmu_vm_map_cmd smmu_map_cmd;
  198. struct msm_audio_smmu_vm_map_cmd_rsp cmd_rsp;
  199. struct msm_audio_alloc_data *alloc_data = NULL;
  200. unsigned long delay = jiffies + (HZ / 2);
  201. *len = dma_buf->size;
  202. mutex_lock(&(msm_audio_ion_data.list_mutex));
  203. list_for_each_entry(alloc_data, &(msm_audio_ion_data.alloc_list),
  204. list) {
  205. if (alloc_data->dma_buf == dma_buf) {
  206. found = true;
  207. /* Export the buffer to physical VM */
  208. rc = habmm_export(msm_audio_ion_hab_handle, dma_buf, *len,
  209. &export_id, HABMM_EXPIMP_FLAGS_DMABUF);
  210. if (rc) {
  211. pr_err("%s: habmm_export failed dma_buf = %pK, len = %zd, rc = %d\n",
  212. __func__, dma_buf, *len, rc);
  213. goto err;
  214. }
  215. exported = true;
  216. smmu_map_cmd.cmd_id = MSM_AUDIO_SMMU_VM_CMD_MAP;
  217. smmu_map_cmd.export_id = export_id;
  218. smmu_map_cmd.buf_size = *len;
  219. rc = habmm_socket_send(msm_audio_ion_hab_handle,
  220. (void *)&smmu_map_cmd, sizeof(smmu_map_cmd), 0);
  221. if (rc) {
  222. pr_err("%s: habmm_socket_send failed %d\n",
  223. __func__, rc);
  224. goto err;
  225. }
  226. do {
  227. cmd_rsp_size = sizeof(cmd_rsp);
  228. rc = habmm_socket_recv(msm_audio_ion_hab_handle,
  229. (void *)&cmd_rsp,
  230. &cmd_rsp_size,
  231. 0xFFFFFFFF,
  232. 0);
  233. } while (time_before(jiffies, delay) && (rc == -EINTR) &&
  234. (cmd_rsp_size == 0));
  235. if (rc) {
  236. pr_err("%s: habmm_socket_recv failed %d\n",
  237. __func__, rc);
  238. goto err;
  239. }
  240. if (cmd_rsp_size != sizeof(cmd_rsp)) {
  241. pr_err("%s: invalid size for cmd rsp %u, expected %zu\n",
  242. __func__, cmd_rsp_size, sizeof(cmd_rsp));
  243. rc = -EIO;
  244. goto err;
  245. }
  246. if (cmd_rsp.status) {
  247. pr_err("%s: SMMU map command failed %d\n",
  248. __func__, cmd_rsp.status);
  249. rc = cmd_rsp.status;
  250. goto err;
  251. }
  252. *paddr = (dma_addr_t)cmd_rsp.addr;
  253. alloc_data->export_id = export_id;
  254. break;
  255. }
  256. }
  257. mutex_unlock(&(msm_audio_ion_data.list_mutex));
  258. if (!found) {
  259. pr_err("%s: cannot find allocation, dma_buf %pK", __func__, dma_buf);
  260. return -EINVAL;
  261. }
  262. return 0;
  263. err:
  264. if (exported)
  265. (void)habmm_unexport(msm_audio_ion_hab_handle, export_id, 0);
  266. mutex_unlock(&(msm_audio_ion_data.list_mutex));
  267. return rc;
  268. }
  269. static int msm_audio_ion_smmu_unmap(struct dma_buf *dma_buf)
  270. {
  271. int rc;
  272. bool found = false;
  273. u32 cmd_rsp_size;
  274. struct msm_audio_smmu_vm_unmap_cmd smmu_unmap_cmd;
  275. struct msm_audio_smmu_vm_unmap_cmd_rsp cmd_rsp;
  276. struct msm_audio_alloc_data *alloc_data, *next;
  277. unsigned long delay = jiffies + (HZ / 2);
  278. /*
  279. * Though list_for_each_entry_safe is delete safe, lock
  280. * should be explicitly acquired to avoid race condition
  281. * on adding elements to the list.
  282. */
  283. mutex_lock(&(msm_audio_ion_data.list_mutex));
  284. list_for_each_entry_safe(alloc_data, next,
  285. &(msm_audio_ion_data.alloc_list), list) {
  286. if (alloc_data->dma_buf == dma_buf) {
  287. found = true;
  288. smmu_unmap_cmd.cmd_id = MSM_AUDIO_SMMU_VM_CMD_UNMAP;
  289. smmu_unmap_cmd.export_id = alloc_data->export_id;
  290. rc = habmm_socket_send(msm_audio_ion_hab_handle,
  291. (void *)&smmu_unmap_cmd,
  292. sizeof(smmu_unmap_cmd), 0);
  293. if (rc) {
  294. pr_err("%s: habmm_socket_send failed %d\n",
  295. __func__, rc);
  296. goto err;
  297. }
  298. do {
  299. cmd_rsp_size = sizeof(cmd_rsp);
  300. rc = habmm_socket_recv(msm_audio_ion_hab_handle,
  301. (void *)&cmd_rsp,
  302. &cmd_rsp_size,
  303. 0xFFFFFFFF,
  304. 0);
  305. } while (time_before(jiffies, delay) &&
  306. (rc == -EINTR) && (cmd_rsp_size == 0));
  307. if (rc) {
  308. pr_err("%s: habmm_socket_recv failed %d\n",
  309. __func__, rc);
  310. goto err;
  311. }
  312. if (cmd_rsp_size != sizeof(cmd_rsp)) {
  313. pr_err("%s: invalid size for cmd rsp %u\n",
  314. __func__, cmd_rsp_size);
  315. rc = -EIO;
  316. goto err;
  317. }
  318. if (cmd_rsp.status) {
  319. pr_err("%s: SMMU unmap command failed %d\n",
  320. __func__, cmd_rsp.status);
  321. rc = cmd_rsp.status;
  322. goto err;
  323. }
  324. rc = habmm_unexport(msm_audio_ion_hab_handle,
  325. alloc_data->export_id, 0xFFFFFFFF);
  326. if (rc) {
  327. pr_err("%s: habmm_unexport failed export_id = %d, rc = %d\n",
  328. __func__, alloc_data->export_id, rc);
  329. }
  330. break;
  331. }
  332. }
  333. mutex_unlock(&(msm_audio_ion_data.list_mutex));
  334. if (!found) {
  335. pr_err("%s: cannot find allocation, dma_buf %pK\n", __func__, dma_buf);
  336. rc = -EINVAL;
  337. }
  338. return rc;
  339. err:
  340. if (found) {
  341. (void)habmm_unexport(msm_audio_ion_hab_handle,
  342. alloc_data->export_id, 0xFFFFFFFF);
  343. list_del(&(alloc_data->list));
  344. kfree(alloc_data);
  345. }
  346. mutex_unlock(&(msm_audio_ion_data.list_mutex));
  347. return rc;
  348. }
  349. static int msm_audio_ion_get_phys(struct dma_buf *dma_buf,
  350. dma_addr_t *addr, size_t *len)
  351. {
  352. int rc = 0;
  353. rc = msm_audio_dma_buf_map(dma_buf, addr, len, false);
  354. if (rc) {
  355. pr_err("%s: failed to map DMA buf, err = %d\n",
  356. __func__, rc);
  357. goto err;
  358. }
  359. pr_debug("phys=%pK, len=%zd, rc=%d\n", &(*addr), *len, rc);
  360. err:
  361. return rc;
  362. }
  363. static int msm_audio_ion_map_kernel(struct dma_buf *dma_buf,
  364. struct iosys_map *iosys_vmap)
  365. {
  366. int rc = 0;
  367. struct msm_audio_alloc_data *alloc_data = NULL;
  368. rc = dma_buf_begin_cpu_access(dma_buf, DMA_BIDIRECTIONAL);
  369. if (rc) {
  370. pr_err("%s: kmap dma_buf_begin_cpu_access fail\n", __func__);
  371. goto exit;
  372. }
  373. rc = dma_buf_vmap(dma_buf, iosys_vmap);
  374. if (rc) {
  375. pr_err("%s: kernel mapping of dma_buf failed\n",
  376. __func__);
  377. goto exit;
  378. }
  379. /*
  380. * TBD: remove the below section once new API
  381. * for mapping kernel virtual address is available.
  382. */
  383. mutex_lock(&(msm_audio_ion_data.list_mutex));
  384. list_for_each_entry(alloc_data, &(msm_audio_ion_data.alloc_list),
  385. list) {
  386. if (alloc_data->dma_buf == dma_buf) {
  387. alloc_data->vmap = iosys_vmap;
  388. break;
  389. }
  390. }
  391. mutex_unlock(&(msm_audio_ion_data.list_mutex));
  392. exit:
  393. return rc;
  394. }
  395. static int msm_audio_ion_unmap_kernel(struct dma_buf *dma_buf)
  396. {
  397. int rc = 0;
  398. struct iosys_map *iosys_vmap = NULL
  399. struct msm_audio_alloc_data *alloc_data = NULL;
  400. struct device *cb_dev = msm_audio_ion_data.cb_dev;
  401. /*
  402. * TBD: remove the below section once new API
  403. * for unmapping kernel virtual address is available.
  404. */
  405. mutex_lock(&(msm_audio_ion_data.list_mutex));
  406. list_for_each_entry(alloc_data, &(msm_audio_ion_data.alloc_list),
  407. list) {
  408. if (alloc_data->dma_buf == dma_buf) {
  409. iosys_vmap = alloc_data->vmap;
  410. break;
  411. }
  412. }
  413. mutex_unlock(&(msm_audio_ion_data.list_mutex));
  414. if (!iosys_vmap) {
  415. dev_err(cb_dev,
  416. "%s: cannot find allocation for dma_buf %pK",
  417. __func__, dma_buf);
  418. rc = -EINVAL;
  419. goto err;
  420. }
  421. dma_buf_vunmap(dma_buf, iosys_vmap);
  422. rc = dma_buf_end_cpu_access(dma_buf, DMA_BIDIRECTIONAL);
  423. if (rc) {
  424. dev_err(cb_dev, "%s: kmap dma_buf_end_cpu_access fail\n",
  425. __func__);
  426. goto err;
  427. }
  428. err:
  429. return rc;
  430. }
  431. static int msm_audio_ion_map_buf(struct dma_buf *dma_buf, dma_addr_t *paddr,
  432. size_t *plen, struct iosys_map *iosys_vmap)
  433. {
  434. int rc = 0;
  435. if (!dma_buf || !paddr || !vaddr || !plen) {
  436. pr_err("%s: Invalid params\n", __func__);
  437. return -EINVAL;
  438. }
  439. rc = msm_audio_ion_get_phys(dma_buf, paddr, plen);
  440. if (rc) {
  441. pr_err("%s: ION Get Physical for AUDIO failed, rc = %d\n",
  442. __func__, rc);
  443. dma_buf_put(dma_buf);
  444. goto err;
  445. }
  446. rc = msm_audio_ion_map_kernel(dma_buf, iosys_vmap);
  447. if (rc) {
  448. pr_err("%s: ION memory mapping for AUDIO failed, err:%d\n",
  449. __func__, rc);
  450. rc = -ENOMEM;
  451. msm_audio_dma_buf_unmap(dma_buf, false);
  452. goto err;
  453. }
  454. if (msm_audio_ion_data.smmu_enabled) {
  455. rc = msm_audio_ion_smmu_map(dma_buf, paddr, plen);
  456. if (rc) {
  457. pr_err("%s: failed to do smmu map, err = %d\n",
  458. __func__, rc);
  459. msm_audio_dma_buf_unmap(dma_buf, false);
  460. goto err;
  461. }
  462. }
  463. err:
  464. return rc;
  465. }
  466. /**
  467. * msm_audio_ion_alloc -
  468. * Allocs ION memory for given client name
  469. *
  470. * @dma_buf: dma_buf for the ION memory
  471. * @bufsz: buffer size
  472. * @paddr: Physical address to be assigned with allocated region
  473. * @plen: length of allocated region to be assigned
  474. * @iosys_vmap: Virtual mapping vmap pointer to be assigned
  475. *
  476. * Returns 0 on success or error on failure
  477. */
  478. int msm_audio_ion_alloc(struct dma_buf **dma_buf, size_t bufsz,
  479. dma_addr_t *paddr, size_t *plen, struct iosys_map *iosys_vmap)
  480. {
  481. int rc = -EINVAL;
  482. unsigned long err_ion_ptr = 0;
  483. if (!(msm_audio_ion_data.device_status & MSM_AUDIO_ION_PROBED)) {
  484. pr_debug("%s:probe is not done, deferred\n", __func__);
  485. return -EPROBE_DEFER;
  486. }
  487. if (!dma_buf || !paddr || !bufsz || !plen) {
  488. pr_err("%s: Invalid params\n", __func__);
  489. return -EINVAL;
  490. }
  491. pr_debug("%s: audio heap is used\n", __func__);
  492. if (msm_audio_ion_data.smmu_enabled == true) {
  493. *dma_buf = ion_alloc(bufsz, ION_HEAP(ION_AUDIO_HEAP_ID), 0);
  494. if (IS_ERR_OR_NULL((void *)(*dma_buf))) {
  495. if (IS_ERR((void *)(*dma_buf)))
  496. err_ion_ptr = PTR_ERR((int *)(*dma_buf));
  497. pr_debug("%s: ION alloc failed for audio heap err ptr=%ld, smmu_enabled=%d,"
  498. "trying system heap..\n",
  499. __func__, err_ion_ptr, msm_audio_ion_data.smmu_enabled);
  500. *dma_buf = ion_alloc(bufsz, ION_HEAP(ION_SYSTEM_HEAP_ID), 0);
  501. }
  502. } else {
  503. *dma_buf = ion_alloc(bufsz, ION_HEAP(ION_AUDIO_HEAP_ID), 0);
  504. }
  505. if (IS_ERR_OR_NULL((void *)(*dma_buf))) {
  506. if (IS_ERR((void *)(*dma_buf)))
  507. err_ion_ptr = PTR_ERR((int *)(*dma_buf));
  508. pr_err("%s: ION alloc fail err ptr=%ld, smmu_enabled=%d\n",
  509. __func__, err_ion_ptr, msm_audio_ion_data.smmu_enabled);
  510. rc = -ENOMEM;
  511. goto err;
  512. }
  513. rc = msm_audio_ion_map_buf(*dma_buf, paddr, plen, iosys_vmap);
  514. if (rc) {
  515. pr_err("%s: failed to map ION buf, rc = %d\n", __func__, rc);
  516. goto err;
  517. }
  518. pr_debug("%s: mapped address = %pK, size=%zd\n", __func__,
  519. iosys_vmap->vaddr, bufsz);
  520. memset(iosys_vmap, 0, sizeof(struct iosys_map));
  521. err:
  522. return rc;
  523. }
  524. EXPORT_SYMBOL(msm_audio_ion_alloc);
  525. int msm_audio_ion_phys_free(void *handle,
  526. dma_addr_t *paddr,
  527. size_t *pa_len,
  528. u8 assign_type,
  529. int id,
  530. int key)
  531. {
  532. handle = NULL;
  533. return 0;
  534. }
  535. EXPORT_SYMBOL(msm_audio_ion_phys_free);
  536. int msm_audio_ion_phys_assign(void **handle, int fd,
  537. dma_addr_t *paddr, size_t *pa_len, u8 assign_type, int id)
  538. {
  539. *handle = NULL;
  540. return 0;
  541. }
  542. EXPORT_SYMBOL(msm_audio_ion_phys_assign);
  543. bool msm_audio_is_hypervisor_supported(void)
  544. {
  545. return false;
  546. }
  547. EXPORT_SYMBOL(msm_audio_is_hypervisor_supported);
  548. /**
  549. * msm_audio_ion_import-
  550. * Import ION buffer with given file descriptor
  551. *
  552. * @dma_buf: dma_buf for the ION memory
  553. * @fd: file descriptor for the ION memory
  554. * @ionflag: flags associated with ION buffer
  555. * @bufsz: buffer size
  556. * @paddr: Physical address to be assigned with allocated region
  557. * @plen: length of allocated region to be assigned
  558. * @iosys_vmap: Virtual mapping vmap pointer to be assigned
  559. *
  560. * Returns 0 on success or error on failure
  561. */
  562. int msm_audio_ion_import(struct dma_buf **dma_buf, int fd,
  563. unsigned long *ionflag, size_t bufsz,
  564. dma_addr_t *paddr, size_t *plen, struct iosys_map *iosys_vmap)
  565. {
  566. int rc = 0;
  567. if (!(msm_audio_ion_data.device_status & MSM_AUDIO_ION_PROBED)) {
  568. pr_debug("%s: probe is not done, deferred\n", __func__);
  569. return -EPROBE_DEFER;
  570. }
  571. if (!dma_buf || !paddr || !plen) {
  572. pr_err("%s: Invalid params\n", __func__);
  573. return -EINVAL;
  574. }
  575. /* bufsz should be 0 and fd shouldn't be 0 as of now */
  576. *dma_buf = dma_buf_get(fd);
  577. pr_debug("%s: dma_buf =%pK, fd=%d\n", __func__, *dma_buf, fd);
  578. if (IS_ERR_OR_NULL((void *)(*dma_buf))) {
  579. pr_err("%s: dma_buf_get failed\n", __func__);
  580. rc = -EINVAL;
  581. goto err;
  582. }
  583. if (ionflag != NULL) {
  584. rc = dma_buf_get_flags(*dma_buf, ionflag);
  585. if (rc) {
  586. pr_err("%s: could not get flags for the dma_buf\n",
  587. __func__);
  588. goto err_ion_flag;
  589. }
  590. }
  591. rc = msm_audio_ion_map_buf(*dma_buf, paddr, plen, iosys_vmap);
  592. if (rc) {
  593. pr_err("%s: failed to map ION buf, rc = %d\n", __func__, rc);
  594. goto err;
  595. }
  596. pr_debug("%s: mapped address = %pK, size=%zd\n", __func__,
  597. iosys_vmap->vaddr, bufsz);
  598. return 0;
  599. err_ion_flag:
  600. dma_buf_put(*dma_buf);
  601. err:
  602. *dma_buf = NULL;
  603. return rc;
  604. }
  605. EXPORT_SYMBOL(msm_audio_ion_import);
  606. /**
  607. * msm_audio_ion_import_cma-
  608. * Import ION buffer with given file descriptor
  609. *
  610. * @dma_buf: dma_buf for the ION memory
  611. * @fd: file descriptor for the ION memory
  612. * @ionflag: flags associated with ION buffer
  613. * @bufsz: buffer size
  614. * @paddr: Physical address to be assigned with allocated region
  615. * @plen: length of allocated region to be assigned
  616. * @vaddr: virtual address to be assigned
  617. *
  618. * Returns 0 on success or error on failure
  619. */
  620. int msm_audio_ion_import_cma(struct dma_buf **dma_buf, int fd,
  621. unsigned long *ionflag, size_t bufsz,
  622. dma_addr_t *paddr, size_t *plen, void **vaddr)
  623. {
  624. int rc = 0;
  625. if (!(msm_audio_ion_data.device_status & MSM_AUDIO_ION_PROBED)) {
  626. pr_debug("%s: probe is not done, deferred\n", __func__);
  627. return -EPROBE_DEFER;
  628. }
  629. if (!dma_buf || !paddr || !vaddr || !plen ||
  630. !msm_audio_ion_data.cb_cma_dev) {
  631. pr_err("%s: Invalid params\n", __func__);
  632. return -EINVAL;
  633. }
  634. /* bufsz should be 0 and fd shouldn't be 0 as of now */
  635. *dma_buf = dma_buf_get(fd);
  636. pr_debug("%s: dma_buf =%pK, fd=%d\n", __func__, *dma_buf, fd);
  637. if (IS_ERR_OR_NULL((void *)(*dma_buf))) {
  638. pr_err("%s: dma_buf_get failed\n", __func__);
  639. rc = -EINVAL;
  640. goto err;
  641. }
  642. if (ionflag != NULL) {
  643. rc = dma_buf_get_flags(*dma_buf, ionflag);
  644. if (rc) {
  645. pr_err("%s: could not get flags for the dma_buf\n",
  646. __func__);
  647. goto err_ion_flag;
  648. }
  649. }
  650. msm_audio_dma_buf_map(*dma_buf, paddr, plen, true);
  651. return 0;
  652. err_ion_flag:
  653. dma_buf_put(*dma_buf);
  654. err:
  655. *dma_buf = NULL;
  656. return rc;
  657. }
  658. EXPORT_SYMBOL(msm_audio_ion_import_cma);
  659. /**
  660. * msm_audio_ion_free -
  661. * fress ION memory for given client and handle
  662. *
  663. * @dma_buf: dma_buf for the ION memory
  664. *
  665. * Returns 0 on success or error on failure
  666. */
  667. int msm_audio_ion_free(struct dma_buf *dma_buf)
  668. {
  669. int ret = 0;
  670. if (!dma_buf) {
  671. pr_err("%s: dma_buf invalid\n", __func__);
  672. return -EINVAL;
  673. }
  674. ret = msm_audio_ion_unmap_kernel(dma_buf);
  675. if (ret)
  676. return ret;
  677. if (msm_audio_ion_data.smmu_enabled) {
  678. ret = msm_audio_ion_smmu_unmap(dma_buf);
  679. if (ret)
  680. pr_err("%s: smmu unmap failed with ret %d\n",
  681. __func__, ret);
  682. }
  683. msm_audio_dma_buf_unmap(dma_buf, false);
  684. return 0;
  685. }
  686. EXPORT_SYMBOL(msm_audio_ion_free);
  687. /**
  688. * msm_audio_ion_free_cma -
  689. * fress ION memory for given client and handle
  690. *
  691. * @dma_buf: dma_buf for the ION memory
  692. *
  693. * Returns 0 on success or error on failure
  694. */
  695. int msm_audio_ion_free_cma(struct dma_buf *dma_buf)
  696. {
  697. if (!dma_buf) {
  698. pr_err("%s: dma_buf invalid\n", __func__);
  699. return -EINVAL;
  700. }
  701. msm_audio_dma_buf_unmap(dma_buf, true);
  702. return 0;
  703. }
  704. EXPORT_SYMBOL(msm_audio_ion_free_cma);
  705. /**
  706. * msm_audio_ion_mmap -
  707. * Audio ION memory map
  708. *
  709. * @abuff: audio buf pointer
  710. * @vma: virtual mem area
  711. *
  712. * Returns 0 on success or error on failure
  713. */
  714. int msm_audio_ion_mmap(struct audio_buffer *abuff,
  715. struct vm_area_struct *vma)
  716. {
  717. struct msm_audio_alloc_data *alloc_data = NULL;
  718. struct sg_table *table;
  719. unsigned long addr = vma->vm_start;
  720. unsigned long offset = vma->vm_pgoff * PAGE_SIZE;
  721. struct scatterlist *sg;
  722. unsigned int i;
  723. struct page *page;
  724. int ret = 0;
  725. bool found = false;
  726. struct device *cb_dev = msm_audio_ion_data.cb_dev;
  727. mutex_lock(&(msm_audio_ion_data.list_mutex));
  728. list_for_each_entry(alloc_data, &(msm_audio_ion_data.alloc_list),
  729. list) {
  730. if (alloc_data->dma_buf == abuff->dma_buf) {
  731. found = true;
  732. table = alloc_data->table;
  733. break;
  734. }
  735. }
  736. mutex_unlock(&(msm_audio_ion_data.list_mutex));
  737. if (!found) {
  738. dev_err(cb_dev,
  739. "%s: cannot find allocation, dma_buf %pK",
  740. __func__, abuff->dma_buf);
  741. return -EINVAL;
  742. }
  743. /* uncached */
  744. vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
  745. /* We need to check if a page is associated with this sg list because:
  746. * If the allocation came from a carveout we currently don't have
  747. * pages associated with carved out memory. This might change in the
  748. * future and we can remove this check and the else statement.
  749. */
  750. page = sg_page(table->sgl);
  751. if (page) {
  752. pr_debug("%s: page is NOT null\n", __func__);
  753. for_each_sg(table->sgl, sg, table->nents, i) {
  754. unsigned long remainder = vma->vm_end - addr;
  755. unsigned long len = sg->length;
  756. page = sg_page(sg);
  757. if (offset >= len) {
  758. offset -= len;
  759. continue;
  760. } else if (offset) {
  761. page += offset / PAGE_SIZE;
  762. len -= offset;
  763. offset = 0;
  764. }
  765. len = min(len, remainder);
  766. pr_debug("vma=%pK, addr=%x len=%ld vm_start=%x vm_end=%x vm_page_prot=%lu\n",
  767. vma, (unsigned int)addr, len,
  768. (unsigned int)vma->vm_start,
  769. (unsigned int)vma->vm_end,
  770. (unsigned long)pgprot_val(vma->vm_page_prot));
  771. remap_pfn_range(vma, addr, page_to_pfn(page), len,
  772. vma->vm_page_prot);
  773. addr += len;
  774. if (addr >= vma->vm_end)
  775. return 0;
  776. }
  777. } else {
  778. pr_debug("%s: page is NULL\n", __func__);
  779. ret = -EINVAL;
  780. }
  781. return ret;
  782. }
  783. EXPORT_SYMBOL(msm_audio_ion_mmap);
  784. /**
  785. * msm_audio_populate_upper_32_bits -
  786. * retrieve upper 32bits of 64bit address
  787. *
  788. * @pa: 64bit physical address
  789. *
  790. */
  791. u32 msm_audio_populate_upper_32_bits(dma_addr_t pa)
  792. {
  793. return upper_32_bits(pa);
  794. }
  795. EXPORT_SYMBOL(msm_audio_populate_upper_32_bits);
  796. static const struct of_device_id msm_audio_ion_dt_match[] = {
  797. { .compatible = "qcom,msm-audio-ion" },
  798. { .compatible = "qcom,msm-audio-ion-cma"},
  799. { }
  800. };
  801. MODULE_DEVICE_TABLE(of, msm_audio_ion_dt_match);
  802. static int msm_audio_ion_probe(struct platform_device *pdev)
  803. {
  804. int rc = 0;
  805. const char *msm_audio_ion_dt = "qcom,smmu-enabled";
  806. bool smmu_enabled;
  807. struct device *dev = &pdev->dev;
  808. if (dev->of_node == NULL) {
  809. dev_err(dev,
  810. "%s: device tree is not found\n",
  811. __func__);
  812. msm_audio_ion_data.smmu_enabled = 0;
  813. return 0;
  814. }
  815. if (of_device_is_compatible(dev->of_node, "qcom,msm-audio-ion-cma")) {
  816. msm_audio_ion_data.cb_cma_dev = dev;
  817. return 0;
  818. }
  819. smmu_enabled = of_property_read_bool(dev->of_node,
  820. msm_audio_ion_dt);
  821. msm_audio_ion_data.smmu_enabled = smmu_enabled;
  822. if (!smmu_enabled) {
  823. dev_dbg(dev, "%s: SMMU is Disabled\n", __func__);
  824. goto exit;
  825. }
  826. rc = habmm_socket_open(&msm_audio_ion_hab_handle,
  827. HAB_MMID_CREATE(MM_AUD_3,
  828. MSM_AUDIO_SMMU_VM_HAB_MINOR_ID),
  829. 0xFFFFFFFF,
  830. HABMM_SOCKET_OPEN_FLAGS_SINGLE_BE_SINGLE_FE);
  831. if (rc) {
  832. dev_err(dev, "%s: habmm_socket_open failed %d\n",
  833. __func__, rc);
  834. return rc;
  835. }
  836. dev_info(dev, "%s: msm_audio_ion_hab_handle %x\n",
  837. __func__, msm_audio_ion_hab_handle);
  838. INIT_LIST_HEAD(&msm_audio_ion_data.alloc_list);
  839. mutex_init(&(msm_audio_ion_data.list_mutex));
  840. exit:
  841. if (!rc)
  842. msm_audio_ion_data.device_status |= MSM_AUDIO_ION_PROBED;
  843. msm_audio_ion_data.cb_dev = dev;
  844. return rc;
  845. }
  846. static int msm_audio_ion_remove(struct platform_device *pdev)
  847. {
  848. if (msm_audio_ion_data.smmu_enabled) {
  849. if (msm_audio_ion_hab_handle)
  850. habmm_socket_close(msm_audio_ion_hab_handle);
  851. mutex_destroy(&(msm_audio_ion_data.list_mutex));
  852. }
  853. msm_audio_ion_data.smmu_enabled = 0;
  854. msm_audio_ion_data.device_status = 0;
  855. return 0;
  856. }
  857. static struct platform_driver msm_audio_ion_driver = {
  858. .driver = {
  859. .name = "msm-audio-ion",
  860. .owner = THIS_MODULE,
  861. .of_match_table = msm_audio_ion_dt_match,
  862. },
  863. .probe = msm_audio_ion_probe,
  864. .remove = msm_audio_ion_remove,
  865. };
  866. int __init msm_audio_ion_init(void)
  867. {
  868. return platform_driver_register(&msm_audio_ion_driver);
  869. }
  870. void msm_audio_ion_exit(void)
  871. {
  872. platform_driver_unregister(&msm_audio_ion_driver);
  873. }
  874. MODULE_DESCRIPTION("MSM Audio ION VM module");
  875. MODULE_LICENSE("GPL v2");