msm_audio_ion.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2013-2019, The Linux Foundation. All rights reserved.
  4. */
  5. #include <linux/init.h>
  6. #include <linux/kernel.h>
  7. #include <linux/module.h>
  8. #include <linux/err.h>
  9. #include <linux/delay.h>
  10. #include <linux/slab.h>
  11. #include <linux/mutex.h>
  12. #include <linux/list.h>
  13. #include <linux/dma-mapping.h>
  14. #include <linux/dma-buf.h>
  15. #include <linux/platform_device.h>
  16. #include <linux/of_device.h>
  17. #include <linux/export.h>
  18. #include <linux/ion_kernel.h>
  19. #include <ipc/apr.h>
  20. #include <dsp/msm_audio_ion.h>
  21. #define MSM_AUDIO_ION_PROBED (1 << 0)
  22. #define MSM_AUDIO_ION_PHYS_ADDR(alloc_data) \
  23. alloc_data->table->sgl->dma_address
  24. #define MSM_AUDIO_SMMU_SID_OFFSET 32
  25. struct msm_audio_ion_private {
  26. bool smmu_enabled;
  27. struct device *cb_dev;
  28. u8 device_status;
  29. struct list_head alloc_list;
  30. struct mutex list_mutex;
  31. u64 smmu_sid_bits;
  32. u32 smmu_version;
  33. };
  34. struct msm_audio_alloc_data {
  35. size_t len;
  36. void *vaddr;
  37. struct dma_buf *dma_buf;
  38. struct dma_buf_attachment *attach;
  39. struct sg_table *table;
  40. struct list_head list;
  41. };
  42. static struct msm_audio_ion_private msm_audio_ion_data = {0,};
  43. static void msm_audio_ion_add_allocation(
  44. struct msm_audio_ion_private *msm_audio_ion_data,
  45. struct msm_audio_alloc_data *alloc_data)
  46. {
  47. /*
  48. * Since these APIs can be invoked by multiple
  49. * clients, there is need to make sure the list
  50. * of allocations is always protected
  51. */
  52. mutex_lock(&(msm_audio_ion_data->list_mutex));
  53. list_add_tail(&(alloc_data->list),
  54. &(msm_audio_ion_data->alloc_list));
  55. mutex_unlock(&(msm_audio_ion_data->list_mutex));
  56. }
  57. static int msm_audio_dma_buf_map(struct dma_buf *dma_buf,
  58. dma_addr_t *addr, size_t *len)
  59. {
  60. struct msm_audio_alloc_data *alloc_data;
  61. struct device *cb_dev;
  62. unsigned long ionflag = 0;
  63. int rc = 0;
  64. cb_dev = msm_audio_ion_data.cb_dev;
  65. /* Data required per buffer mapping */
  66. alloc_data = kzalloc(sizeof(*alloc_data), GFP_KERNEL);
  67. if (!alloc_data)
  68. return -ENOMEM;
  69. alloc_data->dma_buf = dma_buf;
  70. alloc_data->len = dma_buf->size;
  71. *len = dma_buf->size;
  72. /* Attach the dma_buf to context bank device */
  73. alloc_data->attach = dma_buf_attach(alloc_data->dma_buf,
  74. cb_dev);
  75. if (IS_ERR(alloc_data->attach)) {
  76. rc = PTR_ERR(alloc_data->attach);
  77. dev_err(cb_dev,
  78. "%s: Fail to attach dma_buf to CB, rc = %d\n",
  79. __func__, rc);
  80. goto free_alloc_data;
  81. }
  82. /* For uncached buffers, avoid cache maintanance */
  83. rc = dma_buf_get_flags(alloc_data->dma_buf, &ionflag);
  84. if (rc) {
  85. dev_err(cb_dev, "%s: dma_buf_get_flags failed: %d\n",
  86. __func__, rc);
  87. goto detach_dma_buf;
  88. }
  89. if (!(ionflag & ION_FLAG_CACHED))
  90. alloc_data->attach->dma_map_attrs |= DMA_ATTR_SKIP_CPU_SYNC;
  91. /*
  92. * Get the scatter-gather list.
  93. * There is no info as this is a write buffer or
  94. * read buffer, hence the request is bi-directional
  95. * to accommodate both read and write mappings.
  96. */
  97. alloc_data->table = dma_buf_map_attachment(alloc_data->attach,
  98. DMA_BIDIRECTIONAL);
  99. if (IS_ERR(alloc_data->table)) {
  100. rc = PTR_ERR(alloc_data->table);
  101. dev_err(cb_dev,
  102. "%s: Fail to map attachment, rc = %d\n",
  103. __func__, rc);
  104. goto detach_dma_buf;
  105. }
  106. /* physical address from mapping */
  107. *addr = MSM_AUDIO_ION_PHYS_ADDR(alloc_data);
  108. msm_audio_ion_add_allocation(&msm_audio_ion_data,
  109. alloc_data);
  110. return rc;
  111. detach_dma_buf:
  112. dma_buf_detach(alloc_data->dma_buf,
  113. alloc_data->attach);
  114. free_alloc_data:
  115. kfree(alloc_data);
  116. return rc;
  117. }
  118. static int msm_audio_dma_buf_unmap(struct dma_buf *dma_buf)
  119. {
  120. int rc = 0;
  121. struct msm_audio_alloc_data *alloc_data = NULL;
  122. struct list_head *ptr, *next;
  123. struct device *cb_dev = msm_audio_ion_data.cb_dev;
  124. bool found = false;
  125. /*
  126. * Though list_for_each_safe is delete safe, lock
  127. * should be explicitly acquired to avoid race condition
  128. * on adding elements to the list.
  129. */
  130. mutex_lock(&(msm_audio_ion_data.list_mutex));
  131. list_for_each_safe(ptr, next,
  132. &(msm_audio_ion_data.alloc_list)) {
  133. alloc_data = list_entry(ptr, struct msm_audio_alloc_data,
  134. list);
  135. if (alloc_data->dma_buf == dma_buf) {
  136. found = true;
  137. dma_buf_unmap_attachment(alloc_data->attach,
  138. alloc_data->table,
  139. DMA_BIDIRECTIONAL);
  140. dma_buf_detach(alloc_data->dma_buf,
  141. alloc_data->attach);
  142. dma_buf_put(alloc_data->dma_buf);
  143. list_del(&(alloc_data->list));
  144. kfree(alloc_data);
  145. break;
  146. }
  147. }
  148. mutex_unlock(&(msm_audio_ion_data.list_mutex));
  149. if (!found) {
  150. dev_err(cb_dev,
  151. "%s: cannot find allocation, dma_buf %pK",
  152. __func__, dma_buf);
  153. rc = -EINVAL;
  154. }
  155. return rc;
  156. }
  157. static int msm_audio_ion_get_phys(struct dma_buf *dma_buf,
  158. dma_addr_t *addr, size_t *len)
  159. {
  160. int rc = 0;
  161. rc = msm_audio_dma_buf_map(dma_buf, addr, len);
  162. if (rc) {
  163. pr_err("%s: failed to map DMA buf, err = %d\n",
  164. __func__, rc);
  165. goto err;
  166. }
  167. if (msm_audio_ion_data.smmu_enabled) {
  168. /* Append the SMMU SID information to the IOVA address */
  169. *addr |= msm_audio_ion_data.smmu_sid_bits;
  170. }
  171. pr_debug("phys=%pK, len=%zd, rc=%d\n", &(*addr), *len, rc);
  172. err:
  173. return rc;
  174. }
  175. int msm_audio_ion_get_smmu_info(struct device **cb_dev,
  176. u64 *smmu_sid)
  177. {
  178. if (!cb_dev || !smmu_sid) {
  179. pr_err("%s: Invalid params\n",
  180. __func__);
  181. return -EINVAL;
  182. }
  183. if (!msm_audio_ion_data.cb_dev ||
  184. !msm_audio_ion_data.smmu_sid_bits) {
  185. pr_err("%s: Params not initialized\n",
  186. __func__);
  187. return -EINVAL;
  188. }
  189. *cb_dev = msm_audio_ion_data.cb_dev;
  190. *smmu_sid = msm_audio_ion_data.smmu_sid_bits;
  191. return 0;
  192. }
  193. static void *msm_audio_ion_map_kernel(struct dma_buf *dma_buf)
  194. {
  195. int rc = 0;
  196. void *addr = NULL;
  197. struct msm_audio_alloc_data *alloc_data = NULL;
  198. rc = dma_buf_begin_cpu_access(dma_buf, DMA_BIDIRECTIONAL);
  199. if (rc) {
  200. pr_err("%s: kmap dma_buf_begin_cpu_access fail\n", __func__);
  201. goto exit;
  202. }
  203. addr = dma_buf_vmap(dma_buf);
  204. if (!addr) {
  205. pr_err("%s: kernel mapping of dma_buf failed\n",
  206. __func__);
  207. goto exit;
  208. }
  209. /*
  210. * TBD: remove the below section once new API
  211. * for mapping kernel virtual address is available.
  212. */
  213. mutex_lock(&(msm_audio_ion_data.list_mutex));
  214. list_for_each_entry(alloc_data, &(msm_audio_ion_data.alloc_list),
  215. list) {
  216. if (alloc_data->dma_buf == dma_buf) {
  217. alloc_data->vaddr = addr;
  218. break;
  219. }
  220. }
  221. mutex_unlock(&(msm_audio_ion_data.list_mutex));
  222. exit:
  223. return addr;
  224. }
  225. static int msm_audio_ion_unmap_kernel(struct dma_buf *dma_buf)
  226. {
  227. int rc = 0;
  228. void *vaddr = NULL;
  229. struct msm_audio_alloc_data *alloc_data = NULL;
  230. struct device *cb_dev = msm_audio_ion_data.cb_dev;
  231. /*
  232. * TBD: remove the below section once new API
  233. * for unmapping kernel virtual address is available.
  234. */
  235. mutex_lock(&(msm_audio_ion_data.list_mutex));
  236. list_for_each_entry(alloc_data, &(msm_audio_ion_data.alloc_list),
  237. list) {
  238. if (alloc_data->dma_buf == dma_buf) {
  239. vaddr = alloc_data->vaddr;
  240. break;
  241. }
  242. }
  243. mutex_unlock(&(msm_audio_ion_data.list_mutex));
  244. if (!vaddr) {
  245. dev_err(cb_dev,
  246. "%s: cannot find allocation for dma_buf %pK",
  247. __func__, dma_buf);
  248. rc = -EINVAL;
  249. goto err;
  250. }
  251. dma_buf_vunmap(dma_buf, vaddr);
  252. rc = dma_buf_end_cpu_access(dma_buf, DMA_BIDIRECTIONAL);
  253. if (rc) {
  254. dev_err(cb_dev, "%s: kmap dma_buf_end_cpu_access fail\n",
  255. __func__);
  256. goto err;
  257. }
  258. err:
  259. return rc;
  260. }
  261. static int msm_audio_ion_map_buf(struct dma_buf *dma_buf, dma_addr_t *paddr,
  262. size_t *plen, void **vaddr)
  263. {
  264. int rc = 0;
  265. rc = msm_audio_ion_get_phys(dma_buf, paddr, plen);
  266. if (rc) {
  267. pr_err("%s: ION Get Physical for AUDIO failed, rc = %d\n",
  268. __func__, rc);
  269. goto err;
  270. }
  271. *vaddr = msm_audio_ion_map_kernel(dma_buf);
  272. if (IS_ERR_OR_NULL(*vaddr)) {
  273. pr_err("%s: ION memory mapping for AUDIO failed\n", __func__);
  274. rc = -ENOMEM;
  275. goto err;
  276. }
  277. err:
  278. return rc;
  279. }
  280. static u32 msm_audio_ion_get_smmu_sid_mode32(void)
  281. {
  282. if (msm_audio_ion_data.smmu_enabled)
  283. return upper_32_bits(msm_audio_ion_data.smmu_sid_bits);
  284. else
  285. return 0;
  286. }
  287. /**
  288. * msm_audio_ion_alloc -
  289. * Allocs ION memory for given client name
  290. *
  291. * @dma_buf: dma_buf for the ION memory
  292. * @bufsz: buffer size
  293. * @paddr: Physical address to be assigned with allocated region
  294. * @plen: length of allocated region to be assigned
  295. * vaddr: virtual address to be assigned
  296. *
  297. * Returns 0 on success or error on failure
  298. */
  299. int msm_audio_ion_alloc(struct dma_buf **dma_buf, size_t bufsz,
  300. dma_addr_t *paddr, size_t *plen, void **vaddr)
  301. {
  302. int rc = -EINVAL;
  303. unsigned long err_ion_ptr = 0;
  304. if (!(msm_audio_ion_data.device_status & MSM_AUDIO_ION_PROBED)) {
  305. pr_debug("%s:probe is not done, deferred\n", __func__);
  306. return -EPROBE_DEFER;
  307. }
  308. if (!dma_buf || !paddr || !vaddr || !bufsz || !plen) {
  309. pr_err("%s: Invalid params\n", __func__);
  310. return -EINVAL;
  311. }
  312. if (msm_audio_ion_data.smmu_enabled == true) {
  313. pr_debug("%s: system heap is used\n", __func__);
  314. *dma_buf = ion_alloc(bufsz, ION_HEAP(ION_SYSTEM_HEAP_ID), 0);
  315. } else {
  316. pr_debug("%s: audio heap is used\n", __func__);
  317. *dma_buf = ion_alloc(bufsz, ION_HEAP(ION_AUDIO_HEAP_ID), 0);
  318. }
  319. if (IS_ERR_OR_NULL((void *)(*dma_buf))) {
  320. if (IS_ERR((void *)(*dma_buf)))
  321. err_ion_ptr = PTR_ERR((int *)(*dma_buf));
  322. pr_err("%s: ION alloc fail err ptr=%ld, smmu_enabled=%d\n",
  323. __func__, err_ion_ptr, msm_audio_ion_data.smmu_enabled);
  324. rc = -ENOMEM;
  325. goto err;
  326. }
  327. rc = msm_audio_ion_map_buf(*dma_buf, paddr, plen, vaddr);
  328. if (rc) {
  329. pr_err("%s: failed to map ION buf, rc = %d\n", __func__, rc);
  330. goto err_dma_buf;
  331. }
  332. pr_debug("%s: mapped address = %pK, size=%zd\n", __func__,
  333. *vaddr, bufsz);
  334. memset(*vaddr, 0, bufsz);
  335. return rc;
  336. err_dma_buf:
  337. dma_buf_put(*dma_buf);
  338. err:
  339. return rc;
  340. }
  341. EXPORT_SYMBOL(msm_audio_ion_alloc);
  342. /**
  343. * msm_audio_ion_dma_map -
  344. * Memory maps for a given DMA buffer
  345. *
  346. * @phys_addr: Physical address of DMA buffer to be mapped
  347. * @iova_base: IOVA address of memory mapped DMA buffer
  348. * @size: buffer size
  349. * @dir: DMA direction
  350. * Returns 0 on success or error on failure
  351. */
  352. int msm_audio_ion_dma_map(dma_addr_t *phys_addr, dma_addr_t *iova_base,
  353. u32 size, enum dma_data_direction dir)
  354. {
  355. dma_addr_t iova;
  356. struct device *cb_dev = msm_audio_ion_data.cb_dev;
  357. if (!phys_addr || !iova_base || !size)
  358. return -EINVAL;
  359. iova = dma_map_resource(cb_dev, *phys_addr, size,
  360. dir, 0);
  361. if (dma_mapping_error(cb_dev, iova)) {
  362. pr_err("%s: dma_mapping_error\n", __func__);
  363. return -EIO;
  364. }
  365. pr_debug("%s: dma_mapping_success iova:0x%lx\n", __func__,
  366. (unsigned long)iova);
  367. if (msm_audio_ion_data.smmu_enabled)
  368. /* Append the SMMU SID information to the IOVA address */
  369. iova |= msm_audio_ion_data.smmu_sid_bits;
  370. *iova_base = iova;
  371. return 0;
  372. }
  373. EXPORT_SYMBOL(msm_audio_ion_dma_map);
  374. /**
  375. * msm_audio_ion_import-
  376. * Import ION buffer with given file descriptor
  377. *
  378. * @dma_buf: dma_buf for the ION memory
  379. * @fd: file descriptor for the ION memory
  380. * @ionflag: flags associated with ION buffer
  381. * @bufsz: buffer size
  382. * @paddr: Physical address to be assigned with allocated region
  383. * @plen: length of allocated region to be assigned
  384. * vaddr: virtual address to be assigned
  385. *
  386. * Returns 0 on success or error on failure
  387. */
  388. int msm_audio_ion_import(struct dma_buf **dma_buf, int fd,
  389. unsigned long *ionflag, size_t bufsz,
  390. dma_addr_t *paddr, size_t *plen, void **vaddr)
  391. {
  392. int rc = 0;
  393. if (!(msm_audio_ion_data.device_status & MSM_AUDIO_ION_PROBED)) {
  394. pr_debug("%s: probe is not done, deferred\n", __func__);
  395. return -EPROBE_DEFER;
  396. }
  397. if (!dma_buf || !paddr || !vaddr || !plen) {
  398. pr_err("%s: Invalid params\n", __func__);
  399. return -EINVAL;
  400. }
  401. /* bufsz should be 0 and fd shouldn't be 0 as of now */
  402. *dma_buf = dma_buf_get(fd);
  403. pr_debug("%s: dma_buf =%pK, fd=%d\n", __func__, *dma_buf, fd);
  404. if (IS_ERR_OR_NULL((void *)(*dma_buf))) {
  405. pr_err("%s: dma_buf_get failed\n", __func__);
  406. rc = -EINVAL;
  407. goto err;
  408. }
  409. if (ionflag != NULL) {
  410. rc = dma_buf_get_flags(*dma_buf, ionflag);
  411. if (rc) {
  412. pr_err("%s: could not get flags for the dma_buf\n",
  413. __func__);
  414. goto err_ion_flag;
  415. }
  416. }
  417. rc = msm_audio_ion_map_buf(*dma_buf, paddr, plen, vaddr);
  418. if (rc) {
  419. pr_err("%s: failed to map ION buf, rc = %d\n", __func__, rc);
  420. goto err_ion_flag;
  421. }
  422. pr_debug("%s: mapped address = %pK, size=%zd\n", __func__,
  423. *vaddr, bufsz);
  424. return 0;
  425. err_ion_flag:
  426. dma_buf_put(*dma_buf);
  427. err:
  428. *dma_buf = NULL;
  429. return rc;
  430. }
  431. EXPORT_SYMBOL(msm_audio_ion_import);
  432. /**
  433. * msm_audio_ion_free -
  434. * fress ION memory for given client and handle
  435. *
  436. * @dma_buf: dma_buf for the ION memory
  437. *
  438. * Returns 0 on success or error on failure
  439. */
  440. int msm_audio_ion_free(struct dma_buf *dma_buf)
  441. {
  442. int ret = 0;
  443. if (!dma_buf) {
  444. pr_err("%s: dma_buf invalid\n", __func__);
  445. return -EINVAL;
  446. }
  447. ret = msm_audio_ion_unmap_kernel(dma_buf);
  448. if (ret)
  449. return ret;
  450. msm_audio_dma_buf_unmap(dma_buf);
  451. return 0;
  452. }
  453. EXPORT_SYMBOL(msm_audio_ion_free);
  454. /**
  455. * msm_audio_ion_mmap -
  456. * Audio ION memory map
  457. *
  458. * @abuff: audio buf pointer
  459. * @vma: virtual mem area
  460. *
  461. * Returns 0 on success or error on failure
  462. */
  463. int msm_audio_ion_mmap(struct audio_buffer *abuff,
  464. struct vm_area_struct *vma)
  465. {
  466. struct msm_audio_alloc_data *alloc_data = NULL;
  467. struct sg_table *table;
  468. unsigned long addr = vma->vm_start;
  469. unsigned long offset = vma->vm_pgoff * PAGE_SIZE;
  470. struct scatterlist *sg;
  471. unsigned int i;
  472. struct page *page;
  473. int ret = 0;
  474. bool found = false;
  475. struct device *cb_dev = msm_audio_ion_data.cb_dev;
  476. mutex_lock(&(msm_audio_ion_data.list_mutex));
  477. list_for_each_entry(alloc_data, &(msm_audio_ion_data.alloc_list),
  478. list) {
  479. if (alloc_data->dma_buf == abuff->dma_buf) {
  480. found = true;
  481. table = alloc_data->table;
  482. break;
  483. }
  484. }
  485. mutex_unlock(&(msm_audio_ion_data.list_mutex));
  486. if (!found) {
  487. dev_err(cb_dev,
  488. "%s: cannot find allocation, dma_buf %pK",
  489. __func__, abuff->dma_buf);
  490. return -EINVAL;
  491. }
  492. /* uncached */
  493. vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
  494. /* We need to check if a page is associated with this sg list because:
  495. * If the allocation came from a carveout we currently don't have
  496. * pages associated with carved out memory. This might change in the
  497. * future and we can remove this check and the else statement.
  498. */
  499. page = sg_page(table->sgl);
  500. if (page) {
  501. pr_debug("%s: page is NOT null\n", __func__);
  502. for_each_sg(table->sgl, sg, table->nents, i) {
  503. unsigned long remainder = vma->vm_end - addr;
  504. unsigned long len = sg->length;
  505. page = sg_page(sg);
  506. if (offset >= len) {
  507. offset -= len;
  508. continue;
  509. } else if (offset) {
  510. page += offset / PAGE_SIZE;
  511. len -= offset;
  512. offset = 0;
  513. }
  514. len = min(len, remainder);
  515. pr_debug("vma=%pK, addr=%x len=%ld vm_start=%x vm_end=%x vm_page_prot=%lu\n",
  516. vma, (unsigned int)addr, len,
  517. (unsigned int)vma->vm_start,
  518. (unsigned int)vma->vm_end,
  519. (unsigned long)pgprot_val(vma->vm_page_prot));
  520. remap_pfn_range(vma, addr, page_to_pfn(page), len,
  521. vma->vm_page_prot);
  522. addr += len;
  523. if (addr >= vma->vm_end)
  524. return 0;
  525. }
  526. } else {
  527. pr_debug("%s: page is NULL\n", __func__);
  528. ret = -EINVAL;
  529. }
  530. return ret;
  531. }
  532. EXPORT_SYMBOL(msm_audio_ion_mmap);
  533. /**
  534. * msm_audio_ion_cache_operations-
  535. * Cache operations on cached Audio ION buffers
  536. *
  537. * @abuff: audio buf pointer
  538. * @cache_op: cache operation to be performed
  539. *
  540. * Returns 0 on success or error on failure
  541. */
  542. int msm_audio_ion_cache_operations(struct audio_buffer *abuff, int cache_op)
  543. {
  544. unsigned long ionflag = 0;
  545. int rc = 0;
  546. if (!abuff) {
  547. pr_err("%s: Invalid params: %pK\n", __func__, abuff);
  548. return -EINVAL;
  549. }
  550. rc = dma_buf_get_flags(abuff->dma_buf, &ionflag);
  551. if (rc) {
  552. pr_err("%s: dma_buf_get_flags failed: %d\n", __func__, rc);
  553. goto cache_op_failed;
  554. }
  555. /* Has to be CACHED */
  556. if (ionflag & ION_FLAG_CACHED) {
  557. /* MSM_AUDIO_ION_INV_CACHES or MSM_AUDIO_ION_CLEAN_CACHES */
  558. switch (cache_op) {
  559. case MSM_AUDIO_ION_INV_CACHES:
  560. case MSM_AUDIO_ION_CLEAN_CACHES:
  561. dma_buf_begin_cpu_access(abuff->dma_buf,
  562. DMA_BIDIRECTIONAL);
  563. dma_buf_end_cpu_access(abuff->dma_buf,
  564. DMA_BIDIRECTIONAL);
  565. break;
  566. default:
  567. pr_err("%s: Invalid cache operation %d\n",
  568. __func__, cache_op);
  569. }
  570. } else {
  571. pr_err("%s: Cache ops called on uncached buffer: %pK\n",
  572. __func__, abuff->dma_buf);
  573. rc = -EINVAL;
  574. }
  575. cache_op_failed:
  576. return rc;
  577. }
  578. EXPORT_SYMBOL(msm_audio_ion_cache_operations);
  579. /**
  580. * msm_audio_populate_upper_32_bits -
  581. * retrieve upper 32bits of 64bit address
  582. *
  583. * @pa: 64bit physical address
  584. *
  585. */
  586. u32 msm_audio_populate_upper_32_bits(dma_addr_t pa)
  587. {
  588. if (sizeof(dma_addr_t) == sizeof(u32))
  589. return msm_audio_ion_get_smmu_sid_mode32();
  590. else
  591. return upper_32_bits(pa);
  592. }
  593. EXPORT_SYMBOL(msm_audio_populate_upper_32_bits);
  594. static int msm_audio_smmu_init(struct device *dev)
  595. {
  596. INIT_LIST_HEAD(&msm_audio_ion_data.alloc_list);
  597. mutex_init(&(msm_audio_ion_data.list_mutex));
  598. return 0;
  599. }
  600. static const struct of_device_id msm_audio_ion_dt_match[] = {
  601. { .compatible = "qcom,msm-audio-ion" },
  602. { }
  603. };
  604. MODULE_DEVICE_TABLE(of, msm_audio_ion_dt_match);
  605. static int msm_audio_ion_probe(struct platform_device *pdev)
  606. {
  607. int rc = 0;
  608. u64 smmu_sid = 0;
  609. u64 smmu_sid_mask = 0;
  610. const char *msm_audio_ion_dt = "qcom,smmu-enabled";
  611. const char *msm_audio_ion_smmu = "qcom,smmu-version";
  612. const char *msm_audio_ion_smmu_sid_mask = "qcom,smmu-sid-mask";
  613. bool smmu_enabled;
  614. enum apr_subsys_state q6_state;
  615. struct device *dev = &pdev->dev;
  616. struct of_phandle_args iommuspec;
  617. if (dev->of_node == NULL) {
  618. dev_err(dev,
  619. "%s: device tree is not found\n",
  620. __func__);
  621. msm_audio_ion_data.smmu_enabled = 0;
  622. return 0;
  623. }
  624. smmu_enabled = of_property_read_bool(dev->of_node,
  625. msm_audio_ion_dt);
  626. msm_audio_ion_data.smmu_enabled = smmu_enabled;
  627. if (!smmu_enabled) {
  628. dev_dbg(dev, "%s: SMMU is Disabled\n", __func__);
  629. goto exit;
  630. }
  631. q6_state = apr_get_q6_state();
  632. if (q6_state == APR_SUBSYS_DOWN) {
  633. dev_dbg(dev,
  634. "defering %s, adsp_state %d\n",
  635. __func__, q6_state);
  636. return -EPROBE_DEFER;
  637. }
  638. dev_dbg(dev, "%s: adsp is ready\n", __func__);
  639. rc = of_property_read_u32(dev->of_node,
  640. msm_audio_ion_smmu,
  641. &msm_audio_ion_data.smmu_version);
  642. if (rc) {
  643. dev_err(dev,
  644. "%s: qcom,smmu_version missing in DT node\n",
  645. __func__);
  646. return rc;
  647. }
  648. dev_dbg(dev, "%s: SMMU is Enabled. SMMU version is (%d)",
  649. __func__, msm_audio_ion_data.smmu_version);
  650. /* Get SMMU SID information from Devicetree */
  651. rc = of_property_read_u64(dev->of_node,
  652. msm_audio_ion_smmu_sid_mask,
  653. &smmu_sid_mask);
  654. if (rc) {
  655. dev_err(dev,
  656. "%s: qcom,smmu-sid-mask missing in DT node, using default\n",
  657. __func__);
  658. smmu_sid_mask = 0xFFFFFFFFFFFFFFFF;
  659. }
  660. rc = of_parse_phandle_with_args(dev->of_node, "iommus",
  661. "#iommu-cells", 0, &iommuspec);
  662. if (rc)
  663. dev_err(dev, "%s: could not get smmu SID, ret = %d\n",
  664. __func__, rc);
  665. else
  666. smmu_sid = (iommuspec.args[0] & smmu_sid_mask);
  667. msm_audio_ion_data.smmu_sid_bits =
  668. smmu_sid << MSM_AUDIO_SMMU_SID_OFFSET;
  669. if (msm_audio_ion_data.smmu_version == 0x2) {
  670. rc = msm_audio_smmu_init(dev);
  671. } else {
  672. dev_err(dev, "%s: smmu version invalid %d\n",
  673. __func__, msm_audio_ion_data.smmu_version);
  674. rc = -EINVAL;
  675. }
  676. if (rc)
  677. dev_err(dev, "%s: smmu init failed, err = %d\n",
  678. __func__, rc);
  679. exit:
  680. if (!rc)
  681. msm_audio_ion_data.device_status |= MSM_AUDIO_ION_PROBED;
  682. msm_audio_ion_data.cb_dev = dev;
  683. return rc;
  684. }
  685. static int msm_audio_ion_remove(struct platform_device *pdev)
  686. {
  687. struct device *audio_cb_dev;
  688. audio_cb_dev = msm_audio_ion_data.cb_dev;
  689. msm_audio_ion_data.smmu_enabled = 0;
  690. msm_audio_ion_data.device_status = 0;
  691. return 0;
  692. }
  693. static struct platform_driver msm_audio_ion_driver = {
  694. .driver = {
  695. .name = "msm-audio-ion",
  696. .owner = THIS_MODULE,
  697. .of_match_table = msm_audio_ion_dt_match,
  698. .suppress_bind_attrs = true,
  699. },
  700. .probe = msm_audio_ion_probe,
  701. .remove = msm_audio_ion_remove,
  702. };
  703. int __init msm_audio_ion_init(void)
  704. {
  705. return platform_driver_register(&msm_audio_ion_driver);
  706. }
  707. void msm_audio_ion_exit(void)
  708. {
  709. platform_driver_unregister(&msm_audio_ion_driver);
  710. }
  711. MODULE_DESCRIPTION("MSM Audio ION module");
  712. MODULE_LICENSE("GPL v2");