msm_audio_ion.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
  4. */
  5. #include <linux/init.h>
  6. #include <linux/kernel.h>
  7. #include <linux/module.h>
  8. #include <linux/err.h>
  9. #include <linux/delay.h>
  10. #include <linux/slab.h>
  11. #include <linux/mutex.h>
  12. #include <linux/list.h>
  13. #include <linux/dma-mapping.h>
  14. #include <linux/dma-buf.h>
  15. #include <linux/iommu.h>
  16. #include <linux/platform_device.h>
  17. #include <linux/of_device.h>
  18. #include <linux/export.h>
  19. #include <linux/ion_kernel.h>
  20. #include <ipc/apr.h>
  21. #include <asm/dma-iommu.h>
  22. #include <dsp/msm_audio_ion.h>
  23. #define MSM_AUDIO_ION_PROBED (1 << 0)
  24. #define MSM_AUDIO_ION_PHYS_ADDR(alloc_data) \
  25. alloc_data->table->sgl->dma_address
  26. #define MSM_AUDIO_ION_VA_START 0x10000000
  27. #define MSM_AUDIO_ION_VA_LEN 0x0FFFFFFF
  28. #define MSM_AUDIO_SMMU_SID_OFFSET 32
  29. struct msm_audio_ion_private {
  30. bool smmu_enabled;
  31. struct device *cb_dev;
  32. struct dma_iommu_mapping *mapping;
  33. u8 device_status;
  34. struct list_head alloc_list;
  35. struct mutex list_mutex;
  36. u64 smmu_sid_bits;
  37. u32 smmu_version;
  38. u32 iova_start_addr;
  39. };
  40. struct msm_audio_alloc_data {
  41. size_t len;
  42. void *vaddr;
  43. struct dma_buf *dma_buf;
  44. struct dma_buf_attachment *attach;
  45. struct sg_table *table;
  46. struct list_head list;
  47. };
  48. static struct msm_audio_ion_private msm_audio_ion_data = {0,};
  49. static void msm_audio_ion_add_allocation(
  50. struct msm_audio_ion_private *msm_audio_ion_data,
  51. struct msm_audio_alloc_data *alloc_data)
  52. {
  53. /*
  54. * Since these APIs can be invoked by multiple
  55. * clients, there is need to make sure the list
  56. * of allocations is always protected
  57. */
  58. mutex_lock(&(msm_audio_ion_data->list_mutex));
  59. list_add_tail(&(alloc_data->list),
  60. &(msm_audio_ion_data->alloc_list));
  61. mutex_unlock(&(msm_audio_ion_data->list_mutex));
  62. }
  63. static int msm_audio_dma_buf_map(struct dma_buf *dma_buf,
  64. dma_addr_t *addr, size_t *len)
  65. {
  66. struct msm_audio_alloc_data *alloc_data;
  67. struct device *cb_dev;
  68. unsigned long ionflag = 0;
  69. int rc = 0;
  70. cb_dev = msm_audio_ion_data.cb_dev;
  71. /* Data required per buffer mapping */
  72. alloc_data = kzalloc(sizeof(*alloc_data), GFP_KERNEL);
  73. if (!alloc_data)
  74. return -ENOMEM;
  75. alloc_data->dma_buf = dma_buf;
  76. alloc_data->len = dma_buf->size;
  77. *len = dma_buf->size;
  78. /* Attach the dma_buf to context bank device */
  79. alloc_data->attach = dma_buf_attach(alloc_data->dma_buf,
  80. cb_dev);
  81. if (IS_ERR(alloc_data->attach)) {
  82. rc = PTR_ERR(alloc_data->attach);
  83. dev_err(cb_dev,
  84. "%s: Fail to attach dma_buf to CB, rc = %d\n",
  85. __func__, rc);
  86. goto free_alloc_data;
  87. }
  88. /* For uncached buffers, avoid cache maintanance */
  89. rc = dma_buf_get_flags(alloc_data->dma_buf, &ionflag);
  90. if (rc) {
  91. dev_err(cb_dev, "%s: dma_buf_get_flags failed: %d\n",
  92. __func__, rc);
  93. goto detach_dma_buf;
  94. }
  95. if (!(ionflag & ION_FLAG_CACHED))
  96. alloc_data->attach->dma_map_attrs |= DMA_ATTR_SKIP_CPU_SYNC;
  97. /*
  98. * Get the scatter-gather list.
  99. * There is no info as this is a write buffer or
  100. * read buffer, hence the request is bi-directional
  101. * to accommodate both read and write mappings.
  102. */
  103. alloc_data->table = dma_buf_map_attachment(alloc_data->attach,
  104. DMA_BIDIRECTIONAL);
  105. if (IS_ERR(alloc_data->table)) {
  106. rc = PTR_ERR(alloc_data->table);
  107. dev_err(cb_dev,
  108. "%s: Fail to map attachment, rc = %d\n",
  109. __func__, rc);
  110. goto detach_dma_buf;
  111. }
  112. /* physical address from mapping */
  113. *addr = MSM_AUDIO_ION_PHYS_ADDR(alloc_data);
  114. msm_audio_ion_add_allocation(&msm_audio_ion_data,
  115. alloc_data);
  116. return rc;
  117. detach_dma_buf:
  118. dma_buf_detach(alloc_data->dma_buf,
  119. alloc_data->attach);
  120. free_alloc_data:
  121. kfree(alloc_data);
  122. return rc;
  123. }
  124. static int msm_audio_dma_buf_unmap(struct dma_buf *dma_buf)
  125. {
  126. int rc = 0;
  127. struct msm_audio_alloc_data *alloc_data = NULL;
  128. struct list_head *ptr, *next;
  129. struct device *cb_dev = msm_audio_ion_data.cb_dev;
  130. bool found = false;
  131. /*
  132. * Though list_for_each_safe is delete safe, lock
  133. * should be explicitly acquired to avoid race condition
  134. * on adding elements to the list.
  135. */
  136. mutex_lock(&(msm_audio_ion_data.list_mutex));
  137. list_for_each_safe(ptr, next,
  138. &(msm_audio_ion_data.alloc_list)) {
  139. alloc_data = list_entry(ptr, struct msm_audio_alloc_data,
  140. list);
  141. if (alloc_data->dma_buf == dma_buf) {
  142. found = true;
  143. dma_buf_unmap_attachment(alloc_data->attach,
  144. alloc_data->table,
  145. DMA_BIDIRECTIONAL);
  146. dma_buf_detach(alloc_data->dma_buf,
  147. alloc_data->attach);
  148. dma_buf_put(alloc_data->dma_buf);
  149. list_del(&(alloc_data->list));
  150. kfree(alloc_data);
  151. break;
  152. }
  153. }
  154. mutex_unlock(&(msm_audio_ion_data.list_mutex));
  155. if (!found) {
  156. dev_err(cb_dev,
  157. "%s: cannot find allocation, dma_buf %pK",
  158. __func__, dma_buf);
  159. rc = -EINVAL;
  160. }
  161. return rc;
  162. }
  163. static int msm_audio_ion_get_phys(struct dma_buf *dma_buf,
  164. dma_addr_t *addr, size_t *len)
  165. {
  166. int rc = 0;
  167. rc = msm_audio_dma_buf_map(dma_buf, addr, len);
  168. if (rc) {
  169. pr_err("%s: failed to map DMA buf, err = %d\n",
  170. __func__, rc);
  171. goto err;
  172. }
  173. if (msm_audio_ion_data.smmu_enabled) {
  174. /* Append the SMMU SID information to the IOVA address */
  175. *addr |= msm_audio_ion_data.smmu_sid_bits;
  176. }
  177. pr_debug("phys=%pK, len=%zd, rc=%d\n", &(*addr), *len, rc);
  178. err:
  179. return rc;
  180. }
  181. int msm_audio_ion_get_smmu_info(struct device **cb_dev,
  182. u64 *smmu_sid)
  183. {
  184. if (!cb_dev || !smmu_sid) {
  185. pr_err("%s: Invalid params\n",
  186. __func__);
  187. return -EINVAL;
  188. }
  189. if (!msm_audio_ion_data.cb_dev ||
  190. !msm_audio_ion_data.smmu_sid_bits) {
  191. pr_err("%s: Params not initialized\n",
  192. __func__);
  193. return -EINVAL;
  194. }
  195. *cb_dev = msm_audio_ion_data.cb_dev;
  196. *smmu_sid = msm_audio_ion_data.smmu_sid_bits;
  197. return 0;
  198. }
  199. static void *msm_audio_ion_map_kernel(struct dma_buf *dma_buf)
  200. {
  201. int rc = 0;
  202. void *addr = NULL;
  203. struct msm_audio_alloc_data *alloc_data = NULL;
  204. rc = dma_buf_begin_cpu_access(dma_buf, DMA_BIDIRECTIONAL);
  205. if (rc) {
  206. pr_err("%s: kmap dma_buf_begin_cpu_access fail\n", __func__);
  207. goto exit;
  208. }
  209. addr = dma_buf_vmap(dma_buf);
  210. if (!addr) {
  211. pr_err("%s: kernel mapping of dma_buf failed\n",
  212. __func__);
  213. goto exit;
  214. }
  215. /*
  216. * TBD: remove the below section once new API
  217. * for mapping kernel virtual address is available.
  218. */
  219. mutex_lock(&(msm_audio_ion_data.list_mutex));
  220. list_for_each_entry(alloc_data, &(msm_audio_ion_data.alloc_list),
  221. list) {
  222. if (alloc_data->dma_buf == dma_buf) {
  223. alloc_data->vaddr = addr;
  224. break;
  225. }
  226. }
  227. mutex_unlock(&(msm_audio_ion_data.list_mutex));
  228. exit:
  229. return addr;
  230. }
  231. static int msm_audio_ion_unmap_kernel(struct dma_buf *dma_buf)
  232. {
  233. int rc = 0;
  234. void *vaddr = NULL;
  235. struct msm_audio_alloc_data *alloc_data = NULL;
  236. struct device *cb_dev = msm_audio_ion_data.cb_dev;
  237. /*
  238. * TBD: remove the below section once new API
  239. * for unmapping kernel virtual address is available.
  240. */
  241. mutex_lock(&(msm_audio_ion_data.list_mutex));
  242. list_for_each_entry(alloc_data, &(msm_audio_ion_data.alloc_list),
  243. list) {
  244. if (alloc_data->dma_buf == dma_buf) {
  245. vaddr = alloc_data->vaddr;
  246. break;
  247. }
  248. }
  249. mutex_unlock(&(msm_audio_ion_data.list_mutex));
  250. if (!vaddr) {
  251. dev_err(cb_dev,
  252. "%s: cannot find allocation for dma_buf %pK",
  253. __func__, dma_buf);
  254. rc = -EINVAL;
  255. goto err;
  256. }
  257. dma_buf_vunmap(dma_buf, vaddr);
  258. rc = dma_buf_end_cpu_access(dma_buf, DMA_BIDIRECTIONAL);
  259. if (rc) {
  260. dev_err(cb_dev, "%s: kmap dma_buf_end_cpu_access fail\n",
  261. __func__);
  262. goto err;
  263. }
  264. err:
  265. return rc;
  266. }
  267. static int msm_audio_ion_map_buf(struct dma_buf *dma_buf, dma_addr_t *paddr,
  268. size_t *plen, void **vaddr)
  269. {
  270. int rc = 0;
  271. rc = msm_audio_ion_get_phys(dma_buf, paddr, plen);
  272. if (rc) {
  273. pr_err("%s: ION Get Physical for AUDIO failed, rc = %d\n",
  274. __func__, rc);
  275. goto err;
  276. }
  277. *vaddr = msm_audio_ion_map_kernel(dma_buf);
  278. if (IS_ERR_OR_NULL(*vaddr)) {
  279. pr_err("%s: ION memory mapping for AUDIO failed\n", __func__);
  280. rc = -ENOMEM;
  281. goto err;
  282. }
  283. err:
  284. return rc;
  285. }
  286. static u32 msm_audio_ion_get_smmu_sid_mode32(void)
  287. {
  288. if (msm_audio_ion_data.smmu_enabled)
  289. return upper_32_bits(msm_audio_ion_data.smmu_sid_bits);
  290. else
  291. return 0;
  292. }
  293. /**
  294. * msm_audio_ion_alloc -
  295. * Allocs ION memory for given client name
  296. *
  297. * @dma_buf: dma_buf for the ION memory
  298. * @bufsz: buffer size
  299. * @paddr: Physical address to be assigned with allocated region
  300. * @plen: length of allocated region to be assigned
  301. * vaddr: virtual address to be assigned
  302. *
  303. * Returns 0 on success or error on failure
  304. */
  305. int msm_audio_ion_alloc(struct dma_buf **dma_buf, size_t bufsz,
  306. dma_addr_t *paddr, size_t *plen, void **vaddr)
  307. {
  308. int rc = -EINVAL;
  309. unsigned long err_ion_ptr = 0;
  310. if (!(msm_audio_ion_data.device_status & MSM_AUDIO_ION_PROBED)) {
  311. pr_debug("%s:probe is not done, deferred\n", __func__);
  312. return -EPROBE_DEFER;
  313. }
  314. if (!dma_buf || !paddr || !vaddr || !bufsz || !plen) {
  315. pr_err("%s: Invalid params\n", __func__);
  316. return -EINVAL;
  317. }
  318. if (msm_audio_ion_data.smmu_enabled == true) {
  319. pr_debug("%s: system heap is used\n", __func__);
  320. *dma_buf = ion_alloc(bufsz, ION_HEAP(ION_SYSTEM_HEAP_ID), 0);
  321. } else {
  322. pr_debug("%s: audio heap is used\n", __func__);
  323. *dma_buf = ion_alloc(bufsz, ION_HEAP(ION_AUDIO_HEAP_ID), 0);
  324. }
  325. if (IS_ERR_OR_NULL((void *)(*dma_buf))) {
  326. if (IS_ERR((void *)(*dma_buf)))
  327. err_ion_ptr = PTR_ERR((int *)(*dma_buf));
  328. pr_err("%s: ION alloc fail err ptr=%ld, smmu_enabled=%d\n",
  329. __func__, err_ion_ptr, msm_audio_ion_data.smmu_enabled);
  330. rc = -ENOMEM;
  331. goto err;
  332. }
  333. rc = msm_audio_ion_map_buf(*dma_buf, paddr, plen, vaddr);
  334. if (rc) {
  335. pr_err("%s: failed to map ION buf, rc = %d\n", __func__, rc);
  336. goto err_dma_buf;
  337. }
  338. pr_debug("%s: mapped address = %pK, size=%zd\n", __func__,
  339. *vaddr, bufsz);
  340. memset(*vaddr, 0, bufsz);
  341. return rc;
  342. err_dma_buf:
  343. dma_buf_put(*dma_buf);
  344. err:
  345. return rc;
  346. }
  347. EXPORT_SYMBOL(msm_audio_ion_alloc);
  348. /**
  349. * msm_audio_ion_import-
  350. * Import ION buffer with given file descriptor
  351. *
  352. * @dma_buf: dma_buf for the ION memory
  353. * @fd: file descriptor for the ION memory
  354. * @ionflag: flags associated with ION buffer
  355. * @bufsz: buffer size
  356. * @paddr: Physical address to be assigned with allocated region
  357. * @plen: length of allocated region to be assigned
  358. * vaddr: virtual address to be assigned
  359. *
  360. * Returns 0 on success or error on failure
  361. */
  362. int msm_audio_ion_import(struct dma_buf **dma_buf, int fd,
  363. unsigned long *ionflag, size_t bufsz,
  364. dma_addr_t *paddr, size_t *plen, void **vaddr)
  365. {
  366. int rc = 0;
  367. if (!(msm_audio_ion_data.device_status & MSM_AUDIO_ION_PROBED)) {
  368. pr_debug("%s: probe is not done, deferred\n", __func__);
  369. return -EPROBE_DEFER;
  370. }
  371. if (!dma_buf || !paddr || !vaddr || !plen) {
  372. pr_err("%s: Invalid params\n", __func__);
  373. return -EINVAL;
  374. }
  375. /* bufsz should be 0 and fd shouldn't be 0 as of now */
  376. *dma_buf = dma_buf_get(fd);
  377. pr_debug("%s: dma_buf =%pK, fd=%d\n", __func__, *dma_buf, fd);
  378. if (IS_ERR_OR_NULL((void *)(*dma_buf))) {
  379. pr_err("%s: dma_buf_get failed\n", __func__);
  380. rc = -EINVAL;
  381. goto err;
  382. }
  383. if (ionflag != NULL) {
  384. rc = dma_buf_get_flags(*dma_buf, ionflag);
  385. if (rc) {
  386. pr_err("%s: could not get flags for the dma_buf\n",
  387. __func__);
  388. goto err_ion_flag;
  389. }
  390. }
  391. rc = msm_audio_ion_map_buf(*dma_buf, paddr, plen, vaddr);
  392. if (rc) {
  393. pr_err("%s: failed to map ION buf, rc = %d\n", __func__, rc);
  394. goto err_ion_flag;
  395. }
  396. pr_debug("%s: mapped address = %pK, size=%zd\n", __func__,
  397. *vaddr, bufsz);
  398. return 0;
  399. err_ion_flag:
  400. dma_buf_put(*dma_buf);
  401. err:
  402. *dma_buf = NULL;
  403. return rc;
  404. }
  405. EXPORT_SYMBOL(msm_audio_ion_import);
  406. /**
  407. * msm_audio_ion_free -
  408. * fress ION memory for given client and handle
  409. *
  410. * @dma_buf: dma_buf for the ION memory
  411. *
  412. * Returns 0 on success or error on failure
  413. */
  414. int msm_audio_ion_free(struct dma_buf *dma_buf)
  415. {
  416. int ret = 0;
  417. if (!dma_buf) {
  418. pr_err("%s: dma_buf invalid\n", __func__);
  419. return -EINVAL;
  420. }
  421. ret = msm_audio_ion_unmap_kernel(dma_buf);
  422. if (ret)
  423. return ret;
  424. msm_audio_dma_buf_unmap(dma_buf);
  425. return 0;
  426. }
  427. EXPORT_SYMBOL(msm_audio_ion_free);
  428. /**
  429. * msm_audio_ion_mmap -
  430. * Audio ION memory map
  431. *
  432. * @abuff: audio buf pointer
  433. * @vma: virtual mem area
  434. *
  435. * Returns 0 on success or error on failure
  436. */
  437. int msm_audio_ion_mmap(struct audio_buffer *abuff,
  438. struct vm_area_struct *vma)
  439. {
  440. struct msm_audio_alloc_data *alloc_data = NULL;
  441. struct sg_table *table;
  442. unsigned long addr = vma->vm_start;
  443. unsigned long offset = vma->vm_pgoff * PAGE_SIZE;
  444. struct scatterlist *sg;
  445. unsigned int i;
  446. struct page *page;
  447. int ret = 0;
  448. bool found = false;
  449. struct device *cb_dev = msm_audio_ion_data.cb_dev;
  450. mutex_lock(&(msm_audio_ion_data.list_mutex));
  451. list_for_each_entry(alloc_data, &(msm_audio_ion_data.alloc_list),
  452. list) {
  453. if (alloc_data->dma_buf == abuff->dma_buf) {
  454. found = true;
  455. table = alloc_data->table;
  456. break;
  457. }
  458. }
  459. mutex_unlock(&(msm_audio_ion_data.list_mutex));
  460. if (!found) {
  461. dev_err(cb_dev,
  462. "%s: cannot find allocation, dma_buf %pK",
  463. __func__, abuff->dma_buf);
  464. return -EINVAL;
  465. }
  466. /* uncached */
  467. vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
  468. /* We need to check if a page is associated with this sg list because:
  469. * If the allocation came from a carveout we currently don't have
  470. * pages associated with carved out memory. This might change in the
  471. * future and we can remove this check and the else statement.
  472. */
  473. page = sg_page(table->sgl);
  474. if (page) {
  475. pr_debug("%s: page is NOT null\n", __func__);
  476. for_each_sg(table->sgl, sg, table->nents, i) {
  477. unsigned long remainder = vma->vm_end - addr;
  478. unsigned long len = sg->length;
  479. page = sg_page(sg);
  480. if (offset >= len) {
  481. offset -= len;
  482. continue;
  483. } else if (offset) {
  484. page += offset / PAGE_SIZE;
  485. len -= offset;
  486. offset = 0;
  487. }
  488. len = min(len, remainder);
  489. pr_debug("vma=%pK, addr=%x len=%ld vm_start=%x vm_end=%x vm_page_prot=%lu\n",
  490. vma, (unsigned int)addr, len,
  491. (unsigned int)vma->vm_start,
  492. (unsigned int)vma->vm_end,
  493. (unsigned long)pgprot_val(vma->vm_page_prot));
  494. remap_pfn_range(vma, addr, page_to_pfn(page), len,
  495. vma->vm_page_prot);
  496. addr += len;
  497. if (addr >= vma->vm_end)
  498. return 0;
  499. }
  500. } else {
  501. pr_debug("%s: page is NULL\n", __func__);
  502. ret = -EINVAL;
  503. }
  504. return ret;
  505. }
  506. EXPORT_SYMBOL(msm_audio_ion_mmap);
  507. /**
  508. * msm_audio_ion_cache_operations-
  509. * Cache operations on cached Audio ION buffers
  510. *
  511. * @abuff: audio buf pointer
  512. * @cache_op: cache operation to be performed
  513. *
  514. * Returns 0 on success or error on failure
  515. */
  516. int msm_audio_ion_cache_operations(struct audio_buffer *abuff, int cache_op)
  517. {
  518. unsigned long ionflag = 0;
  519. int rc = 0;
  520. if (!abuff) {
  521. pr_err("%s: Invalid params: %pK\n", __func__, abuff);
  522. return -EINVAL;
  523. }
  524. rc = dma_buf_get_flags(abuff->dma_buf, &ionflag);
  525. if (rc) {
  526. pr_err("%s: dma_buf_get_flags failed: %d\n", __func__, rc);
  527. goto cache_op_failed;
  528. }
  529. /* Has to be CACHED */
  530. if (ionflag & ION_FLAG_CACHED) {
  531. /* MSM_AUDIO_ION_INV_CACHES or MSM_AUDIO_ION_CLEAN_CACHES */
  532. switch (cache_op) {
  533. case MSM_AUDIO_ION_INV_CACHES:
  534. case MSM_AUDIO_ION_CLEAN_CACHES:
  535. dma_buf_begin_cpu_access(abuff->dma_buf,
  536. DMA_BIDIRECTIONAL);
  537. dma_buf_end_cpu_access(abuff->dma_buf,
  538. DMA_BIDIRECTIONAL);
  539. break;
  540. default:
  541. pr_err("%s: Invalid cache operation %d\n",
  542. __func__, cache_op);
  543. }
  544. } else {
  545. pr_err("%s: Cache ops called on uncached buffer: %pK\n",
  546. __func__, abuff->dma_buf);
  547. rc = -EINVAL;
  548. }
  549. cache_op_failed:
  550. return rc;
  551. }
  552. EXPORT_SYMBOL(msm_audio_ion_cache_operations);
  553. /**
  554. * msm_audio_populate_upper_32_bits -
  555. * retrieve upper 32bits of 64bit address
  556. *
  557. * @pa: 64bit physical address
  558. *
  559. */
  560. u32 msm_audio_populate_upper_32_bits(dma_addr_t pa)
  561. {
  562. if (sizeof(dma_addr_t) == sizeof(u32))
  563. return msm_audio_ion_get_smmu_sid_mode32();
  564. else
  565. return upper_32_bits(pa);
  566. }
  567. EXPORT_SYMBOL(msm_audio_populate_upper_32_bits);
  568. static int msm_audio_smmu_init(struct device *dev)
  569. {
  570. struct dma_iommu_mapping *mapping;
  571. int ret;
  572. mapping = arm_iommu_create_mapping(&platform_bus_type,
  573. msm_audio_ion_data.iova_start_addr,
  574. MSM_AUDIO_ION_VA_LEN);
  575. if (IS_ERR(mapping))
  576. return PTR_ERR(mapping);
  577. ret = arm_iommu_attach_device(dev, mapping);
  578. if (ret) {
  579. dev_err(dev, "%s: Attach failed, err = %d\n",
  580. __func__, ret);
  581. goto fail_attach;
  582. }
  583. msm_audio_ion_data.mapping = mapping;
  584. INIT_LIST_HEAD(&msm_audio_ion_data.alloc_list);
  585. mutex_init(&(msm_audio_ion_data.list_mutex));
  586. return 0;
  587. fail_attach:
  588. arm_iommu_release_mapping(mapping);
  589. return ret;
  590. }
  591. static const struct of_device_id msm_audio_ion_dt_match[] = {
  592. { .compatible = "qcom,msm-audio-ion" },
  593. { }
  594. };
  595. MODULE_DEVICE_TABLE(of, msm_audio_ion_dt_match);
  596. static int msm_audio_ion_probe(struct platform_device *pdev)
  597. {
  598. int rc = 0;
  599. u64 smmu_sid = 0;
  600. u64 smmu_sid_mask = 0;
  601. const char *msm_audio_ion_dt = "qcom,smmu-enabled";
  602. const char *msm_audio_ion_smmu = "qcom,smmu-version";
  603. const char *msm_audio_ion_iova_start_addr = "qcom,iova-start-addr";
  604. const char *msm_audio_ion_smmu_sid_mask = "qcom,smmu-sid-mask";
  605. bool smmu_enabled;
  606. enum apr_subsys_state q6_state;
  607. struct device *dev = &pdev->dev;
  608. struct of_phandle_args iommuspec;
  609. if (dev->of_node == NULL) {
  610. dev_err(dev,
  611. "%s: device tree is not found\n",
  612. __func__);
  613. msm_audio_ion_data.smmu_enabled = 0;
  614. return 0;
  615. }
  616. smmu_enabled = of_property_read_bool(dev->of_node,
  617. msm_audio_ion_dt);
  618. msm_audio_ion_data.smmu_enabled = smmu_enabled;
  619. if (!smmu_enabled) {
  620. dev_dbg(dev, "%s: SMMU is Disabled\n", __func__);
  621. goto exit;
  622. }
  623. q6_state = apr_get_q6_state();
  624. if (q6_state == APR_SUBSYS_DOWN) {
  625. dev_dbg(dev,
  626. "defering %s, adsp_state %d\n",
  627. __func__, q6_state);
  628. return -EPROBE_DEFER;
  629. }
  630. dev_dbg(dev, "%s: adsp is ready\n", __func__);
  631. rc = of_property_read_u32(dev->of_node,
  632. msm_audio_ion_smmu,
  633. &msm_audio_ion_data.smmu_version);
  634. if (rc) {
  635. dev_err(dev,
  636. "%s: qcom,smmu_version missing in DT node\n",
  637. __func__);
  638. return rc;
  639. }
  640. dev_dbg(dev, "%s: SMMU is Enabled. SMMU version is (%d)",
  641. __func__, msm_audio_ion_data.smmu_version);
  642. rc = of_property_read_u32(dev->of_node,
  643. msm_audio_ion_iova_start_addr,
  644. &msm_audio_ion_data.iova_start_addr);
  645. if (rc) {
  646. dev_dbg(dev,
  647. "%s: qcom,iova_start_addr missing in DT node, initialize with default val\n",
  648. __func__);
  649. msm_audio_ion_data.iova_start_addr = MSM_AUDIO_ION_VA_START;
  650. } else {
  651. dev_dbg(dev, "%s:IOVA start addr: 0x%x\n",
  652. __func__, msm_audio_ion_data.iova_start_addr);
  653. }
  654. /* Get SMMU SID information from Devicetree */
  655. rc = of_property_read_u64(dev->of_node,
  656. msm_audio_ion_smmu_sid_mask,
  657. &smmu_sid_mask);
  658. if (rc) {
  659. dev_err(dev,
  660. "%s: qcom,smmu-sid-mask missing in DT node, using default\n",
  661. __func__);
  662. smmu_sid_mask = 0xFFFFFFFFFFFFFFFF;
  663. }
  664. rc = of_parse_phandle_with_args(dev->of_node, "iommus",
  665. "#iommu-cells", 0, &iommuspec);
  666. if (rc)
  667. dev_err(dev, "%s: could not get smmu SID, ret = %d\n",
  668. __func__, rc);
  669. else
  670. smmu_sid = (iommuspec.args[0] & smmu_sid_mask);
  671. msm_audio_ion_data.smmu_sid_bits =
  672. smmu_sid << MSM_AUDIO_SMMU_SID_OFFSET;
  673. if (msm_audio_ion_data.smmu_version == 0x2) {
  674. rc = msm_audio_smmu_init(dev);
  675. } else {
  676. dev_err(dev, "%s: smmu version invalid %d\n",
  677. __func__, msm_audio_ion_data.smmu_version);
  678. rc = -EINVAL;
  679. }
  680. if (rc)
  681. dev_err(dev, "%s: smmu init failed, err = %d\n",
  682. __func__, rc);
  683. exit:
  684. if (!rc)
  685. msm_audio_ion_data.device_status |= MSM_AUDIO_ION_PROBED;
  686. msm_audio_ion_data.cb_dev = dev;
  687. return rc;
  688. }
  689. static int msm_audio_ion_remove(struct platform_device *pdev)
  690. {
  691. struct dma_iommu_mapping *mapping;
  692. struct device *audio_cb_dev;
  693. mapping = msm_audio_ion_data.mapping;
  694. audio_cb_dev = msm_audio_ion_data.cb_dev;
  695. if (audio_cb_dev && mapping) {
  696. arm_iommu_detach_device(audio_cb_dev);
  697. arm_iommu_release_mapping(mapping);
  698. }
  699. msm_audio_ion_data.smmu_enabled = 0;
  700. msm_audio_ion_data.device_status = 0;
  701. return 0;
  702. }
  703. static struct platform_driver msm_audio_ion_driver = {
  704. .driver = {
  705. .name = "msm-audio-ion",
  706. .owner = THIS_MODULE,
  707. .of_match_table = msm_audio_ion_dt_match,
  708. },
  709. .probe = msm_audio_ion_probe,
  710. .remove = msm_audio_ion_remove,
  711. };
  712. int __init msm_audio_ion_init(void)
  713. {
  714. return platform_driver_register(&msm_audio_ion_driver);
  715. }
  716. void msm_audio_ion_exit(void)
  717. {
  718. platform_driver_unregister(&msm_audio_ion_driver);
  719. }
  720. MODULE_DESCRIPTION("MSM Audio ION module");
  721. MODULE_LICENSE("GPL v2");