msm_audio_ion_vm.c 24 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2013-2020, The Linux Foundation. All rights reserved.
  4. * Copyright (c) 2021 Qualcomm Innovation Center, Inc. All rights reserved.
  5. */
  6. #include <linux/init.h>
  7. #include <linux/kernel.h>
  8. #include <linux/module.h>
  9. #include <linux/err.h>
  10. #include <linux/delay.h>
  11. #include <linux/slab.h>
  12. #include <linux/mutex.h>
  13. #include <linux/list.h>
  14. #include <linux/dma-mapping.h>
  15. #include <linux/dma-buf.h>
  16. #include <linux/dma-buf-map.h>
  17. #include <linux/iommu.h>
  18. #include <linux/platform_device.h>
  19. #include <linux/of_device.h>
  20. #include <linux/export.h>
  21. #include <ipc/apr.h>
  22. #include <dsp/msm_audio_ion.h>
  23. #include <linux/habmm.h>
  24. #define MSM_AUDIO_ION_PROBED (1 << 0)
  25. #define MSM_AUDIO_ION_PHYS_ADDR(alloc_data) \
  26. alloc_data->table->sgl->dma_address
  27. #define MSM_AUDIO_SMMU_VM_CMD_MAP 0x00000001
  28. #define MSM_AUDIO_SMMU_VM_CMD_UNMAP 0x00000002
  29. #define MSM_AUDIO_SMMU_VM_HAB_MINOR_ID 1
  30. struct msm_audio_ion_private {
  31. bool smmu_enabled;
  32. struct device *cb_dev;
  33. struct device *cb_cma_dev;
  34. u8 device_status;
  35. struct list_head alloc_list;
  36. struct mutex list_mutex;
  37. };
  38. struct msm_audio_alloc_data {
  39. size_t len;
  40. struct dma_buf_map *vmap;
  41. struct dma_buf *dma_buf;
  42. struct dma_buf_attachment *attach;
  43. struct sg_table *table;
  44. struct list_head list;
  45. u32 export_id;
  46. };
  47. struct msm_audio_smmu_vm_map_cmd {
  48. int cmd_id;
  49. u32 export_id;
  50. u32 buf_size;
  51. };
  52. struct msm_audio_smmu_vm_map_cmd_rsp {
  53. int status;
  54. u64 addr;
  55. };
  56. struct msm_audio_smmu_vm_unmap_cmd {
  57. int cmd_id;
  58. u32 export_id;
  59. };
  60. struct msm_audio_smmu_vm_unmap_cmd_rsp {
  61. int status;
  62. };
  63. static struct msm_audio_ion_private msm_audio_ion_data = {0,};
  64. static u32 msm_audio_ion_hab_handle;
  65. static void msm_audio_ion_add_allocation(
  66. struct msm_audio_ion_private *msm_audio_ion_data,
  67. struct msm_audio_alloc_data *alloc_data)
  68. {
  69. /*
  70. * Since these APIs can be invoked by multiple
  71. * clients, there is need to make sure the list
  72. * of allocations is always protected
  73. */
  74. mutex_lock(&(msm_audio_ion_data->list_mutex));
  75. list_add_tail(&(alloc_data->list),
  76. &(msm_audio_ion_data->alloc_list));
  77. mutex_unlock(&(msm_audio_ion_data->list_mutex));
  78. }
  79. static int msm_audio_dma_buf_map(struct dma_buf *dma_buf,
  80. dma_addr_t *addr, size_t *len,
  81. bool cma_mem)
  82. {
  83. struct msm_audio_alloc_data *alloc_data;
  84. struct device *cb_dev;
  85. unsigned long ionflag = 0;
  86. int rc = 0;
  87. if (cma_mem)
  88. cb_dev = msm_audio_ion_data.cb_cma_dev;
  89. else
  90. cb_dev = msm_audio_ion_data.cb_dev;
  91. /* Data required per buffer mapping */
  92. alloc_data = kzalloc(sizeof(*alloc_data), GFP_KERNEL);
  93. if (!alloc_data)
  94. return -ENOMEM;
  95. alloc_data->dma_buf = dma_buf;
  96. alloc_data->len = dma_buf->size;
  97. *len = dma_buf->size;
  98. /* Attach the dma_buf to context bank device */
  99. alloc_data->attach = dma_buf_attach(alloc_data->dma_buf,
  100. cb_dev);
  101. if (IS_ERR(alloc_data->attach)) {
  102. rc = PTR_ERR(alloc_data->attach);
  103. dev_err(cb_dev,
  104. "%s: Fail to attach dma_buf to CB, rc = %d\n",
  105. __func__, rc);
  106. goto free_alloc_data;
  107. }
  108. /* For uncached buffers, avoid cache maintanance */
  109. rc = dma_buf_get_flags(alloc_data->dma_buf, &ionflag);
  110. if (rc) {
  111. dev_err(cb_dev, "%s: dma_buf_get_flags failed: %d\n",
  112. __func__, rc);
  113. goto detach_dma_buf;
  114. }
  115. if (!(ionflag & ION_FLAG_CACHED))
  116. alloc_data->attach->dma_map_attrs |= DMA_ATTR_SKIP_CPU_SYNC;
  117. /*
  118. * Get the scatter-gather list.
  119. * There is no info as this is a write buffer or
  120. * read buffer, hence the request is bi-directional
  121. * to accommodate both read and write mappings.
  122. */
  123. alloc_data->table = dma_buf_map_attachment(alloc_data->attach,
  124. DMA_BIDIRECTIONAL);
  125. if (IS_ERR(alloc_data->table)) {
  126. rc = PTR_ERR(alloc_data->table);
  127. dev_err(cb_dev,
  128. "%s: Fail to map attachment, rc = %d\n",
  129. __func__, rc);
  130. goto detach_dma_buf;
  131. }
  132. /* physical address from mapping */
  133. *addr = MSM_AUDIO_ION_PHYS_ADDR(alloc_data);
  134. msm_audio_ion_add_allocation(&msm_audio_ion_data,
  135. alloc_data);
  136. return rc;
  137. detach_dma_buf:
  138. dma_buf_detach(alloc_data->dma_buf,
  139. alloc_data->attach);
  140. free_alloc_data:
  141. kfree(alloc_data);
  142. return rc;
  143. }
  144. static int msm_audio_dma_buf_unmap(struct dma_buf *dma_buf, bool cma_mem)
  145. {
  146. int rc = 0;
  147. struct msm_audio_alloc_data *alloc_data = NULL;
  148. struct list_head *ptr, *next;
  149. struct device *cb_dev;
  150. bool found = false;
  151. if (cma_mem)
  152. cb_dev = msm_audio_ion_data.cb_cma_dev;
  153. else
  154. cb_dev = msm_audio_ion_data.cb_dev;
  155. /*
  156. * Though list_for_each_safe is delete safe, lock
  157. * should be explicitly acquired to avoid race condition
  158. * on adding elements to the list.
  159. */
  160. mutex_lock(&(msm_audio_ion_data.list_mutex));
  161. list_for_each_safe(ptr, next,
  162. &(msm_audio_ion_data.alloc_list)) {
  163. alloc_data = list_entry(ptr, struct msm_audio_alloc_data,
  164. list);
  165. if (alloc_data->dma_buf == dma_buf) {
  166. found = true;
  167. dma_buf_unmap_attachment(alloc_data->attach,
  168. alloc_data->table,
  169. DMA_BIDIRECTIONAL);
  170. dma_buf_detach(alloc_data->dma_buf,
  171. alloc_data->attach);
  172. dma_buf_put(alloc_data->dma_buf);
  173. list_del(&(alloc_data->list));
  174. kfree(alloc_data);
  175. break;
  176. }
  177. }
  178. mutex_unlock(&(msm_audio_ion_data.list_mutex));
  179. if (!found) {
  180. dev_err(cb_dev,
  181. "%s: cannot find allocation, dma_buf %pK",
  182. __func__, dma_buf);
  183. rc = -EINVAL;
  184. }
  185. return rc;
  186. }
  187. static int msm_audio_ion_smmu_map(struct dma_buf *dma_buf,
  188. dma_addr_t *paddr, size_t *len)
  189. {
  190. int rc;
  191. u32 export_id;
  192. u32 cmd_rsp_size;
  193. bool found = false;
  194. bool exported = false;
  195. struct msm_audio_smmu_vm_map_cmd smmu_map_cmd;
  196. struct msm_audio_smmu_vm_map_cmd_rsp cmd_rsp;
  197. struct msm_audio_alloc_data *alloc_data = NULL;
  198. unsigned long delay = jiffies + (HZ / 2);
  199. *len = dma_buf->size;
  200. mutex_lock(&(msm_audio_ion_data.list_mutex));
  201. list_for_each_entry(alloc_data, &(msm_audio_ion_data.alloc_list),
  202. list) {
  203. if (alloc_data->dma_buf == dma_buf) {
  204. found = true;
  205. /* Export the buffer to physical VM */
  206. rc = habmm_export(msm_audio_ion_hab_handle, dma_buf, *len,
  207. &export_id, HABMM_EXPIMP_FLAGS_DMABUF);
  208. if (rc) {
  209. pr_err("%s: habmm_export failed dma_buf = %pK, len = %zd, rc = %d\n",
  210. __func__, dma_buf, *len, rc);
  211. goto err;
  212. }
  213. exported = true;
  214. smmu_map_cmd.cmd_id = MSM_AUDIO_SMMU_VM_CMD_MAP;
  215. smmu_map_cmd.export_id = export_id;
  216. smmu_map_cmd.buf_size = *len;
  217. rc = habmm_socket_send(msm_audio_ion_hab_handle,
  218. (void *)&smmu_map_cmd, sizeof(smmu_map_cmd), 0);
  219. if (rc) {
  220. pr_err("%s: habmm_socket_send failed %d\n",
  221. __func__, rc);
  222. goto err;
  223. }
  224. do {
  225. cmd_rsp_size = sizeof(cmd_rsp);
  226. rc = habmm_socket_recv(msm_audio_ion_hab_handle,
  227. (void *)&cmd_rsp,
  228. &cmd_rsp_size,
  229. 0xFFFFFFFF,
  230. 0);
  231. } while (time_before(jiffies, delay) && (rc == -EINTR) &&
  232. (cmd_rsp_size == 0));
  233. if (rc) {
  234. pr_err("%s: habmm_socket_recv failed %d\n",
  235. __func__, rc);
  236. goto err;
  237. }
  238. if (cmd_rsp_size != sizeof(cmd_rsp)) {
  239. pr_err("%s: invalid size for cmd rsp %u, expected %zu\n",
  240. __func__, cmd_rsp_size, sizeof(cmd_rsp));
  241. rc = -EIO;
  242. goto err;
  243. }
  244. if (cmd_rsp.status) {
  245. pr_err("%s: SMMU map command failed %d\n",
  246. __func__, cmd_rsp.status);
  247. rc = cmd_rsp.status;
  248. goto err;
  249. }
  250. *paddr = (dma_addr_t)cmd_rsp.addr;
  251. alloc_data->export_id = export_id;
  252. break;
  253. }
  254. }
  255. mutex_unlock(&(msm_audio_ion_data.list_mutex));
  256. if (!found) {
  257. pr_err("%s: cannot find allocation, dma_buf %pK", __func__, dma_buf);
  258. return -EINVAL;
  259. }
  260. return 0;
  261. err:
  262. if (exported)
  263. (void)habmm_unexport(msm_audio_ion_hab_handle, export_id, 0);
  264. mutex_unlock(&(msm_audio_ion_data.list_mutex));
  265. return rc;
  266. }
  267. static int msm_audio_ion_smmu_unmap(struct dma_buf *dma_buf)
  268. {
  269. int rc;
  270. bool found = false;
  271. u32 cmd_rsp_size;
  272. struct msm_audio_smmu_vm_unmap_cmd smmu_unmap_cmd;
  273. struct msm_audio_smmu_vm_unmap_cmd_rsp cmd_rsp;
  274. struct msm_audio_alloc_data *alloc_data, *next;
  275. unsigned long delay = jiffies + (HZ / 2);
  276. /*
  277. * Though list_for_each_entry_safe is delete safe, lock
  278. * should be explicitly acquired to avoid race condition
  279. * on adding elements to the list.
  280. */
  281. mutex_lock(&(msm_audio_ion_data.list_mutex));
  282. list_for_each_entry_safe(alloc_data, next,
  283. &(msm_audio_ion_data.alloc_list), list) {
  284. if (alloc_data->dma_buf == dma_buf) {
  285. found = true;
  286. smmu_unmap_cmd.cmd_id = MSM_AUDIO_SMMU_VM_CMD_UNMAP;
  287. smmu_unmap_cmd.export_id = alloc_data->export_id;
  288. rc = habmm_socket_send(msm_audio_ion_hab_handle,
  289. (void *)&smmu_unmap_cmd,
  290. sizeof(smmu_unmap_cmd), 0);
  291. if (rc) {
  292. pr_err("%s: habmm_socket_send failed %d\n",
  293. __func__, rc);
  294. goto err;
  295. }
  296. do {
  297. cmd_rsp_size = sizeof(cmd_rsp);
  298. rc = habmm_socket_recv(msm_audio_ion_hab_handle,
  299. (void *)&cmd_rsp,
  300. &cmd_rsp_size,
  301. 0xFFFFFFFF,
  302. 0);
  303. } while (time_before(jiffies, delay) &&
  304. (rc == -EINTR) && (cmd_rsp_size == 0));
  305. if (rc) {
  306. pr_err("%s: habmm_socket_recv failed %d\n",
  307. __func__, rc);
  308. goto err;
  309. }
  310. if (cmd_rsp_size != sizeof(cmd_rsp)) {
  311. pr_err("%s: invalid size for cmd rsp %u\n",
  312. __func__, cmd_rsp_size);
  313. rc = -EIO;
  314. goto err;
  315. }
  316. if (cmd_rsp.status) {
  317. pr_err("%s: SMMU unmap command failed %d\n",
  318. __func__, cmd_rsp.status);
  319. rc = cmd_rsp.status;
  320. goto err;
  321. }
  322. rc = habmm_unexport(msm_audio_ion_hab_handle,
  323. alloc_data->export_id, 0xFFFFFFFF);
  324. if (rc) {
  325. pr_err("%s: habmm_unexport failed export_id = %d, rc = %d\n",
  326. __func__, alloc_data->export_id, rc);
  327. }
  328. break;
  329. }
  330. }
  331. mutex_unlock(&(msm_audio_ion_data.list_mutex));
  332. if (!found) {
  333. pr_err("%s: cannot find allocation, dma_buf %pK\n", __func__, dma_buf);
  334. rc = -EINVAL;
  335. }
  336. return rc;
  337. err:
  338. if (found) {
  339. (void)habmm_unexport(msm_audio_ion_hab_handle,
  340. alloc_data->export_id, 0xFFFFFFFF);
  341. list_del(&(alloc_data->list));
  342. kfree(alloc_data);
  343. }
  344. mutex_unlock(&(msm_audio_ion_data.list_mutex));
  345. return rc;
  346. }
  347. static int msm_audio_ion_get_phys(struct dma_buf *dma_buf,
  348. dma_addr_t *addr, size_t *len)
  349. {
  350. int rc = 0;
  351. rc = msm_audio_dma_buf_map(dma_buf, addr, len, false);
  352. if (rc) {
  353. pr_err("%s: failed to map DMA buf, err = %d\n",
  354. __func__, rc);
  355. goto err;
  356. }
  357. pr_debug("phys=%pK, len=%zd, rc=%d\n", &(*addr), *len, rc);
  358. err:
  359. return rc;
  360. }
  361. static int msm_audio_ion_map_kernel(struct dma_buf *dma_buf,
  362. struct dma_buf_map *dma_vmap)
  363. {
  364. int rc = 0;
  365. struct msm_audio_alloc_data *alloc_data = NULL;
  366. rc = dma_buf_begin_cpu_access(dma_buf, DMA_BIDIRECTIONAL);
  367. if (rc) {
  368. pr_err("%s: kmap dma_buf_begin_cpu_access fail\n", __func__);
  369. goto exit;
  370. }
  371. rc = dma_buf_vmap(dma_buf, dma_vmap);
  372. if (rc) {
  373. pr_err("%s: kernel mapping of dma_buf failed\n",
  374. __func__);
  375. goto exit;
  376. }
  377. /*
  378. * TBD: remove the below section once new API
  379. * for mapping kernel virtual address is available.
  380. */
  381. mutex_lock(&(msm_audio_ion_data.list_mutex));
  382. list_for_each_entry(alloc_data, &(msm_audio_ion_data.alloc_list),
  383. list) {
  384. if (alloc_data->dma_buf == dma_buf) {
  385. alloc_data->vmap = dma_vmap;
  386. break;
  387. }
  388. }
  389. mutex_unlock(&(msm_audio_ion_data.list_mutex));
  390. exit:
  391. return rc;
  392. }
  393. static int msm_audio_ion_unmap_kernel(struct dma_buf *dma_buf)
  394. {
  395. int rc = 0;
  396. struct dma_buf_map *dma_vmap = NULL
  397. struct msm_audio_alloc_data *alloc_data = NULL;
  398. struct device *cb_dev = msm_audio_ion_data.cb_dev;
  399. /*
  400. * TBD: remove the below section once new API
  401. * for unmapping kernel virtual address is available.
  402. */
  403. mutex_lock(&(msm_audio_ion_data.list_mutex));
  404. list_for_each_entry(alloc_data, &(msm_audio_ion_data.alloc_list),
  405. list) {
  406. if (alloc_data->dma_buf == dma_buf) {
  407. dma_vmap = alloc_data->vmap;
  408. break;
  409. }
  410. }
  411. mutex_unlock(&(msm_audio_ion_data.list_mutex));
  412. if (!dma_vmap) {
  413. dev_err(cb_dev,
  414. "%s: cannot find allocation for dma_buf %pK",
  415. __func__, dma_buf);
  416. rc = -EINVAL;
  417. goto err;
  418. }
  419. dma_buf_vunmap(dma_buf, dma_vmap);
  420. rc = dma_buf_end_cpu_access(dma_buf, DMA_BIDIRECTIONAL);
  421. if (rc) {
  422. dev_err(cb_dev, "%s: kmap dma_buf_end_cpu_access fail\n",
  423. __func__);
  424. goto err;
  425. }
  426. err:
  427. return rc;
  428. }
  429. static int msm_audio_ion_map_buf(struct dma_buf *dma_buf, dma_addr_t *paddr,
  430. size_t *plen, struct dma_buf_map *dma_vmap)
  431. {
  432. int rc = 0;
  433. rc = msm_audio_ion_get_phys(dma_buf, paddr, plen);
  434. if (rc) {
  435. pr_err("%s: ION Get Physical for AUDIO failed, rc = %d\n",
  436. __func__, rc);
  437. dma_buf_put(dma_buf);
  438. goto err;
  439. }
  440. rc = msm_audio_ion_map_kernel(dma_buf, dma_vmap);
  441. if (rc) {
  442. pr_err("%s: ION memory mapping for AUDIO failed, err:%d\n",
  443. __func__, rc);
  444. rc = -ENOMEM;
  445. msm_audio_dma_buf_unmap(dma_buf, false);
  446. goto err;
  447. }
  448. if (msm_audio_ion_data.smmu_enabled) {
  449. rc = msm_audio_ion_smmu_map(dma_buf, paddr, plen);
  450. if (rc) {
  451. pr_err("%s: failed to do smmu map, err = %d\n",
  452. __func__, rc);
  453. msm_audio_dma_buf_unmap(dma_buf, false);
  454. goto err;
  455. }
  456. }
  457. err:
  458. return rc;
  459. }
  460. /**
  461. * msm_audio_ion_alloc -
  462. * Allocs ION memory for given client name
  463. *
  464. * @dma_buf: dma_buf for the ION memory
  465. * @bufsz: buffer size
  466. * @paddr: Physical address to be assigned with allocated region
  467. * @plen: length of allocated region to be assigned
  468. * @dma_vmap: Virtual mapping vmap pointer to be assigned
  469. *
  470. * Returns 0 on success or error on failure
  471. */
  472. int msm_audio_ion_alloc(struct dma_buf **dma_buf, size_t bufsz,
  473. dma_addr_t *paddr, size_t *plen, struct dma_buf_map *dma_vmap)
  474. {
  475. int rc = -EINVAL;
  476. unsigned long err_ion_ptr = 0;
  477. if (!(msm_audio_ion_data.device_status & MSM_AUDIO_ION_PROBED)) {
  478. pr_debug("%s:probe is not done, deferred\n", __func__);
  479. return -EPROBE_DEFER;
  480. }
  481. if (!dma_buf || !paddr || !bufsz || !plen) {
  482. pr_err("%s: Invalid params\n", __func__);
  483. return -EINVAL;
  484. }
  485. pr_debug("%s: audio heap is used\n", __func__);
  486. if (msm_audio_ion_data.smmu_enabled == true) {
  487. *dma_buf = ion_alloc(bufsz, ION_HEAP(ION_AUDIO_HEAP_ID), 0);
  488. if (IS_ERR_OR_NULL((void *)(*dma_buf))) {
  489. if (IS_ERR((void *)(*dma_buf)))
  490. err_ion_ptr = PTR_ERR((int *)(*dma_buf));
  491. pr_debug("%s: ION alloc failed for audio heap err ptr=%ld, smmu_enabled=%d,"
  492. "trying system heap..\n",
  493. __func__, err_ion_ptr, msm_audio_ion_data.smmu_enabled);
  494. *dma_buf = ion_alloc(bufsz, ION_HEAP(ION_SYSTEM_HEAP_ID), 0);
  495. }
  496. } else {
  497. *dma_buf = ion_alloc(bufsz, ION_HEAP(ION_AUDIO_HEAP_ID), 0);
  498. }
  499. if (IS_ERR_OR_NULL((void *)(*dma_buf))) {
  500. if (IS_ERR((void *)(*dma_buf)))
  501. err_ion_ptr = PTR_ERR((int *)(*dma_buf));
  502. pr_err("%s: ION alloc fail err ptr=%ld, smmu_enabled=%d\n",
  503. __func__, err_ion_ptr, msm_audio_ion_data.smmu_enabled);
  504. rc = -ENOMEM;
  505. goto err;
  506. }
  507. rc = msm_audio_ion_map_buf(*dma_buf, paddr, plen, dma_vmap);
  508. if (rc) {
  509. pr_err("%s: failed to map ION buf, rc = %d\n", __func__, rc);
  510. goto err;
  511. }
  512. pr_debug("%s: mapped address = %pK, size=%zd\n", __func__,
  513. dma_vmap->vaddr, bufsz);
  514. memset(dma_vmap, 0, sizeof(struct dma_buf_map));
  515. err:
  516. return rc;
  517. }
  518. EXPORT_SYMBOL(msm_audio_ion_alloc);
  519. int msm_audio_ion_phys_free(void *handle,
  520. dma_addr_t *paddr,
  521. size_t *pa_len,
  522. u8 assign_type,
  523. int id,
  524. int key)
  525. {
  526. handle = NULL;
  527. return 0;
  528. }
  529. EXPORT_SYMBOL(msm_audio_ion_phys_free);
  530. int msm_audio_ion_phys_assign(void **handle, int fd,
  531. dma_addr_t *paddr, size_t *pa_len, u8 assign_type, int id)
  532. {
  533. *handle = NULL;
  534. return 0;
  535. }
  536. EXPORT_SYMBOL(msm_audio_ion_phys_assign);
  537. bool msm_audio_is_hypervisor_supported(void)
  538. {
  539. return false;
  540. }
  541. EXPORT_SYMBOL(msm_audio_is_hypervisor_supported);
  542. /**
  543. * msm_audio_ion_import-
  544. * Import ION buffer with given file descriptor
  545. *
  546. * @dma_buf: dma_buf for the ION memory
  547. * @fd: file descriptor for the ION memory
  548. * @ionflag: flags associated with ION buffer
  549. * @bufsz: buffer size
  550. * @paddr: Physical address to be assigned with allocated region
  551. * @plen: length of allocated region to be assigned
  552. * @dma_vmap: Virtual mapping vmap pointer to be assigned
  553. *
  554. * Returns 0 on success or error on failure
  555. */
  556. int msm_audio_ion_import(struct dma_buf **dma_buf, int fd,
  557. unsigned long *ionflag, size_t bufsz,
  558. dma_addr_t *paddr, size_t *plen, struct dma_buf_map *dma_vmap)
  559. {
  560. int rc = 0;
  561. if (!(msm_audio_ion_data.device_status & MSM_AUDIO_ION_PROBED)) {
  562. pr_debug("%s: probe is not done, deferred\n", __func__);
  563. return -EPROBE_DEFER;
  564. }
  565. if (!dma_buf || !paddr || !plen) {
  566. pr_err("%s: Invalid params\n", __func__);
  567. return -EINVAL;
  568. }
  569. /* bufsz should be 0 and fd shouldn't be 0 as of now */
  570. *dma_buf = dma_buf_get(fd);
  571. pr_debug("%s: dma_buf =%pK, fd=%d\n", __func__, *dma_buf, fd);
  572. if (IS_ERR_OR_NULL((void *)(*dma_buf))) {
  573. pr_err("%s: dma_buf_get failed\n", __func__);
  574. rc = -EINVAL;
  575. goto err;
  576. }
  577. if (ionflag != NULL) {
  578. rc = dma_buf_get_flags(*dma_buf, ionflag);
  579. if (rc) {
  580. pr_err("%s: could not get flags for the dma_buf\n",
  581. __func__);
  582. goto err_ion_flag;
  583. }
  584. }
  585. rc = msm_audio_ion_map_buf(*dma_buf, paddr, plen, dma_vmap);
  586. if (rc) {
  587. pr_err("%s: failed to map ION buf, rc = %d\n", __func__, rc);
  588. goto err;
  589. }
  590. pr_debug("%s: mapped address = %pK, size=%zd\n", __func__,
  591. dma_vmap->vaddr, bufsz);
  592. return 0;
  593. err_ion_flag:
  594. dma_buf_put(*dma_buf);
  595. err:
  596. *dma_buf = NULL;
  597. return rc;
  598. }
  599. EXPORT_SYMBOL(msm_audio_ion_import);
  600. /**
  601. * msm_audio_ion_import_cma-
  602. * Import ION buffer with given file descriptor
  603. *
  604. * @dma_buf: dma_buf for the ION memory
  605. * @fd: file descriptor for the ION memory
  606. * @ionflag: flags associated with ION buffer
  607. * @bufsz: buffer size
  608. * @paddr: Physical address to be assigned with allocated region
  609. * @plen: length of allocated region to be assigned
  610. * @vaddr: virtual address to be assigned
  611. *
  612. * Returns 0 on success or error on failure
  613. */
  614. int msm_audio_ion_import_cma(struct dma_buf **dma_buf, int fd,
  615. unsigned long *ionflag, size_t bufsz,
  616. dma_addr_t *paddr, size_t *plen, void **vaddr)
  617. {
  618. int rc = 0;
  619. if (!(msm_audio_ion_data.device_status & MSM_AUDIO_ION_PROBED)) {
  620. pr_debug("%s: probe is not done, deferred\n", __func__);
  621. return -EPROBE_DEFER;
  622. }
  623. if (!dma_buf || !paddr || !vaddr || !plen ||
  624. !msm_audio_ion_data.cb_cma_dev) {
  625. pr_err("%s: Invalid params\n", __func__);
  626. return -EINVAL;
  627. }
  628. /* bufsz should be 0 and fd shouldn't be 0 as of now */
  629. *dma_buf = dma_buf_get(fd);
  630. pr_debug("%s: dma_buf =%pK, fd=%d\n", __func__, *dma_buf, fd);
  631. if (IS_ERR_OR_NULL((void *)(*dma_buf))) {
  632. pr_err("%s: dma_buf_get failed\n", __func__);
  633. rc = -EINVAL;
  634. goto err;
  635. }
  636. if (ionflag != NULL) {
  637. rc = dma_buf_get_flags(*dma_buf, ionflag);
  638. if (rc) {
  639. pr_err("%s: could not get flags for the dma_buf\n",
  640. __func__);
  641. goto err_ion_flag;
  642. }
  643. }
  644. msm_audio_dma_buf_map(*dma_buf, paddr, plen, true);
  645. return 0;
  646. err_ion_flag:
  647. dma_buf_put(*dma_buf);
  648. err:
  649. *dma_buf = NULL;
  650. return rc;
  651. }
  652. EXPORT_SYMBOL(msm_audio_ion_import_cma);
  653. /**
  654. * msm_audio_ion_free -
  655. * fress ION memory for given client and handle
  656. *
  657. * @dma_buf: dma_buf for the ION memory
  658. *
  659. * Returns 0 on success or error on failure
  660. */
  661. int msm_audio_ion_free(struct dma_buf *dma_buf)
  662. {
  663. int ret = 0;
  664. if (!dma_buf) {
  665. pr_err("%s: dma_buf invalid\n", __func__);
  666. return -EINVAL;
  667. }
  668. ret = msm_audio_ion_unmap_kernel(dma_buf);
  669. if (ret)
  670. return ret;
  671. if (msm_audio_ion_data.smmu_enabled) {
  672. ret = msm_audio_ion_smmu_unmap(dma_buf);
  673. if (ret)
  674. pr_err("%s: smmu unmap failed with ret %d\n",
  675. __func__, ret);
  676. }
  677. msm_audio_dma_buf_unmap(dma_buf, false);
  678. return 0;
  679. }
  680. EXPORT_SYMBOL(msm_audio_ion_free);
  681. /**
  682. * msm_audio_ion_free_cma -
  683. * fress ION memory for given client and handle
  684. *
  685. * @dma_buf: dma_buf for the ION memory
  686. *
  687. * Returns 0 on success or error on failure
  688. */
  689. int msm_audio_ion_free_cma(struct dma_buf *dma_buf)
  690. {
  691. if (!dma_buf) {
  692. pr_err("%s: dma_buf invalid\n", __func__);
  693. return -EINVAL;
  694. }
  695. msm_audio_dma_buf_unmap(dma_buf, true);
  696. return 0;
  697. }
  698. EXPORT_SYMBOL(msm_audio_ion_free_cma);
  699. /**
  700. * msm_audio_ion_mmap -
  701. * Audio ION memory map
  702. *
  703. * @abuff: audio buf pointer
  704. * @vma: virtual mem area
  705. *
  706. * Returns 0 on success or error on failure
  707. */
  708. int msm_audio_ion_mmap(struct audio_buffer *abuff,
  709. struct vm_area_struct *vma)
  710. {
  711. struct msm_audio_alloc_data *alloc_data = NULL;
  712. struct sg_table *table;
  713. unsigned long addr = vma->vm_start;
  714. unsigned long offset = vma->vm_pgoff * PAGE_SIZE;
  715. struct scatterlist *sg;
  716. unsigned int i;
  717. struct page *page;
  718. int ret = 0;
  719. bool found = false;
  720. struct device *cb_dev = msm_audio_ion_data.cb_dev;
  721. mutex_lock(&(msm_audio_ion_data.list_mutex));
  722. list_for_each_entry(alloc_data, &(msm_audio_ion_data.alloc_list),
  723. list) {
  724. if (alloc_data->dma_buf == abuff->dma_buf) {
  725. found = true;
  726. table = alloc_data->table;
  727. break;
  728. }
  729. }
  730. mutex_unlock(&(msm_audio_ion_data.list_mutex));
  731. if (!found) {
  732. dev_err(cb_dev,
  733. "%s: cannot find allocation, dma_buf %pK",
  734. __func__, abuff->dma_buf);
  735. return -EINVAL;
  736. }
  737. /* uncached */
  738. vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
  739. /* We need to check if a page is associated with this sg list because:
  740. * If the allocation came from a carveout we currently don't have
  741. * pages associated with carved out memory. This might change in the
  742. * future and we can remove this check and the else statement.
  743. */
  744. page = sg_page(table->sgl);
  745. if (page) {
  746. pr_debug("%s: page is NOT null\n", __func__);
  747. for_each_sg(table->sgl, sg, table->nents, i) {
  748. unsigned long remainder = vma->vm_end - addr;
  749. unsigned long len = sg->length;
  750. page = sg_page(sg);
  751. if (offset >= len) {
  752. offset -= len;
  753. continue;
  754. } else if (offset) {
  755. page += offset / PAGE_SIZE;
  756. len -= offset;
  757. offset = 0;
  758. }
  759. len = min(len, remainder);
  760. pr_debug("vma=%pK, addr=%x len=%ld vm_start=%x vm_end=%x vm_page_prot=%lu\n",
  761. vma, (unsigned int)addr, len,
  762. (unsigned int)vma->vm_start,
  763. (unsigned int)vma->vm_end,
  764. (unsigned long)pgprot_val(vma->vm_page_prot));
  765. remap_pfn_range(vma, addr, page_to_pfn(page), len,
  766. vma->vm_page_prot);
  767. addr += len;
  768. if (addr >= vma->vm_end)
  769. return 0;
  770. }
  771. } else {
  772. pr_debug("%s: page is NULL\n", __func__);
  773. ret = -EINVAL;
  774. }
  775. return ret;
  776. }
  777. EXPORT_SYMBOL(msm_audio_ion_mmap);
  778. /**
  779. * msm_audio_populate_upper_32_bits -
  780. * retrieve upper 32bits of 64bit address
  781. *
  782. * @pa: 64bit physical address
  783. *
  784. */
  785. u32 msm_audio_populate_upper_32_bits(dma_addr_t pa)
  786. {
  787. return upper_32_bits(pa);
  788. }
  789. EXPORT_SYMBOL(msm_audio_populate_upper_32_bits);
  790. static const struct of_device_id msm_audio_ion_dt_match[] = {
  791. { .compatible = "qcom,msm-audio-ion" },
  792. { .compatible = "qcom,msm-audio-ion-cma"},
  793. { }
  794. };
  795. MODULE_DEVICE_TABLE(of, msm_audio_ion_dt_match);
  796. static int msm_audio_ion_probe(struct platform_device *pdev)
  797. {
  798. int rc = 0;
  799. const char *msm_audio_ion_dt = "qcom,smmu-enabled";
  800. bool smmu_enabled;
  801. struct device *dev = &pdev->dev;
  802. if (dev->of_node == NULL) {
  803. dev_err(dev,
  804. "%s: device tree is not found\n",
  805. __func__);
  806. msm_audio_ion_data.smmu_enabled = 0;
  807. return 0;
  808. }
  809. if (of_device_is_compatible(dev->of_node, "qcom,msm-audio-ion-cma")) {
  810. msm_audio_ion_data.cb_cma_dev = dev;
  811. return 0;
  812. }
  813. smmu_enabled = of_property_read_bool(dev->of_node,
  814. msm_audio_ion_dt);
  815. msm_audio_ion_data.smmu_enabled = smmu_enabled;
  816. if (!smmu_enabled) {
  817. dev_dbg(dev, "%s: SMMU is Disabled\n", __func__);
  818. goto exit;
  819. }
  820. rc = habmm_socket_open(&msm_audio_ion_hab_handle,
  821. HAB_MMID_CREATE(MM_AUD_3,
  822. MSM_AUDIO_SMMU_VM_HAB_MINOR_ID),
  823. 0xFFFFFFFF,
  824. HABMM_SOCKET_OPEN_FLAGS_SINGLE_BE_SINGLE_FE);
  825. if (rc) {
  826. dev_err(dev, "%s: habmm_socket_open failed %d\n",
  827. __func__, rc);
  828. return rc;
  829. }
  830. dev_info(dev, "%s: msm_audio_ion_hab_handle %x\n",
  831. __func__, msm_audio_ion_hab_handle);
  832. INIT_LIST_HEAD(&msm_audio_ion_data.alloc_list);
  833. mutex_init(&(msm_audio_ion_data.list_mutex));
  834. exit:
  835. if (!rc)
  836. msm_audio_ion_data.device_status |= MSM_AUDIO_ION_PROBED;
  837. msm_audio_ion_data.cb_dev = dev;
  838. return rc;
  839. }
  840. static int msm_audio_ion_remove(struct platform_device *pdev)
  841. {
  842. if (msm_audio_ion_data.smmu_enabled) {
  843. if (msm_audio_ion_hab_handle)
  844. habmm_socket_close(msm_audio_ion_hab_handle);
  845. mutex_destroy(&(msm_audio_ion_data.list_mutex));
  846. }
  847. msm_audio_ion_data.smmu_enabled = 0;
  848. msm_audio_ion_data.device_status = 0;
  849. return 0;
  850. }
  851. static struct platform_driver msm_audio_ion_driver = {
  852. .driver = {
  853. .name = "msm-audio-ion",
  854. .owner = THIS_MODULE,
  855. .of_match_table = msm_audio_ion_dt_match,
  856. },
  857. .probe = msm_audio_ion_probe,
  858. .remove = msm_audio_ion_remove,
  859. };
  860. int __init msm_audio_ion_init(void)
  861. {
  862. return platform_driver_register(&msm_audio_ion_driver);
  863. }
  864. void msm_audio_ion_exit(void)
  865. {
  866. platform_driver_unregister(&msm_audio_ion_driver);
  867. }
  868. MODULE_DESCRIPTION("MSM Audio ION VM module");
  869. MODULE_LICENSE("GPL v2");