msm_audio_ion.c 29 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2013-2020, The Linux Foundation. All rights reserved.
  4. */
  5. #include <linux/init.h>
  6. #include <linux/kernel.h>
  7. #include <linux/module.h>
  8. #include <linux/err.h>
  9. #include <linux/delay.h>
  10. #include <linux/slab.h>
  11. #include <linux/mutex.h>
  12. #include <linux/list.h>
  13. #include <linux/dma-mapping.h>
  14. #include <linux/dma-buf.h>
  15. #include <linux/platform_device.h>
  16. #include <linux/of_device.h>
  17. #include <linux/export.h>
  18. #include <linux/ion.h>
  19. #include <linux/ioctl.h>
  20. #include <linux/cdev.h>
  21. #include <linux/fs.h>
  22. #include <linux/device.h>
  23. #ifndef CONFIG_GECKO_CORE
  24. #include <ipc/apr.h>
  25. #endif
  26. #include <dsp/msm_audio_ion.h>
  27. #include <linux/msm_audio.h>
  28. #define MSM_AUDIO_ION_PROBED (1 << 0)
  29. #define MSM_AUDIO_ION_PHYS_ADDR(alloc_data) \
  30. alloc_data->table->sgl->dma_address
  31. #define MSM_AUDIO_SMMU_SID_OFFSET 32
  32. #define TZ_PIL_PROTECT_MEM_SUBSYS_ID 0x0C
  33. #define TZ_PIL_CLEAR_PROTECT_MEM_SUBSYS_ID 0x0D
  34. #define MSM_AUDIO_ION_DRIVER_NAME "msm_audio_ion"
  35. #define MINOR_NUMBER_COUNT 1
  36. struct msm_audio_ion_private {
  37. bool smmu_enabled;
  38. struct device *cb_dev;
  39. u8 device_status;
  40. struct list_head alloc_list;
  41. struct mutex list_mutex;
  42. u64 smmu_sid_bits;
  43. u32 smmu_version;
  44. bool is_non_hypervisor;
  45. /*list to store fd, phy. addr and handle data */
  46. struct list_head fd_list;
  47. /*char dev related data */
  48. dev_t ion_major;
  49. struct class *ion_class;
  50. struct device *chardev;
  51. struct cdev cdev;
  52. };
  53. struct msm_audio_alloc_data {
  54. size_t len;
  55. void *vaddr;
  56. struct dma_buf *dma_buf;
  57. struct dma_buf_attachment *attach;
  58. struct sg_table *table;
  59. struct list_head list;
  60. };
  61. static struct msm_audio_ion_private msm_audio_ion_data = {0,};
  62. struct msm_audio_fd_data {
  63. int fd;
  64. void *handle;
  65. dma_addr_t paddr;
  66. struct list_head list;
  67. };
  68. static void msm_audio_ion_add_allocation(
  69. struct msm_audio_ion_private *msm_audio_ion_data,
  70. struct msm_audio_alloc_data *alloc_data)
  71. {
  72. /*
  73. * Since these APIs can be invoked by multiple
  74. * clients, there is need to make sure the list
  75. * of allocations is always protected
  76. */
  77. mutex_lock(&(msm_audio_ion_data->list_mutex));
  78. list_add_tail(&(alloc_data->list),
  79. &(msm_audio_ion_data->alloc_list));
  80. mutex_unlock(&(msm_audio_ion_data->list_mutex));
  81. }
  82. static void *msm_audio_ion_map_kernel(struct dma_buf *dma_buf)
  83. {
  84. int rc = 0;
  85. void *addr = NULL;
  86. struct msm_audio_alloc_data *alloc_data = NULL;
  87. rc = dma_buf_begin_cpu_access(dma_buf, DMA_BIDIRECTIONAL);
  88. if (rc) {
  89. pr_err("%s: kmap dma_buf_begin_cpu_access fail\n", __func__);
  90. goto exit;
  91. }
  92. addr = dma_buf_vmap(dma_buf);
  93. if (!addr) {
  94. pr_err("%s: kernel mapping of dma_buf failed\n",
  95. __func__);
  96. goto exit;
  97. }
  98. /*
  99. * TBD: remove the below section once new API
  100. * for mapping kernel virtual address is available.
  101. */
  102. mutex_lock(&(msm_audio_ion_data.list_mutex));
  103. list_for_each_entry(alloc_data, &(msm_audio_ion_data.alloc_list),
  104. list) {
  105. if (alloc_data->dma_buf == dma_buf) {
  106. alloc_data->vaddr = addr;
  107. break;
  108. }
  109. }
  110. mutex_unlock(&(msm_audio_ion_data.list_mutex));
  111. exit:
  112. return addr;
  113. }
  114. static int msm_audio_dma_buf_map(struct dma_buf *dma_buf,
  115. dma_addr_t *addr, size_t *len, bool is_iova)
  116. {
  117. struct msm_audio_alloc_data *alloc_data;
  118. struct device *cb_dev;
  119. unsigned long ionflag = 0;
  120. int rc = 0;
  121. void *vaddr = NULL;
  122. cb_dev = msm_audio_ion_data.cb_dev;
  123. /* Data required per buffer mapping */
  124. alloc_data = kzalloc(sizeof(*alloc_data), GFP_KERNEL);
  125. if (!alloc_data)
  126. return -ENOMEM;
  127. alloc_data->dma_buf = dma_buf;
  128. alloc_data->len = dma_buf->size;
  129. *len = dma_buf->size;
  130. /* Attach the dma_buf to context bank device */
  131. alloc_data->attach = dma_buf_attach(alloc_data->dma_buf,
  132. cb_dev);
  133. if (IS_ERR(alloc_data->attach)) {
  134. rc = PTR_ERR(alloc_data->attach);
  135. dev_err(cb_dev,
  136. "%s: Fail to attach dma_buf to CB, rc = %d\n",
  137. __func__, rc);
  138. goto free_alloc_data;
  139. }
  140. /* For uncached buffers, avoid cache maintanance */
  141. rc = dma_buf_get_flags(alloc_data->dma_buf, &ionflag);
  142. if (rc) {
  143. dev_err(cb_dev, "%s: dma_buf_get_flags failed: %d\n",
  144. __func__, rc);
  145. goto detach_dma_buf;
  146. }
  147. if (!(ionflag & ION_FLAG_CACHED))
  148. alloc_data->attach->dma_map_attrs |= DMA_ATTR_SKIP_CPU_SYNC;
  149. /*
  150. * Get the scatter-gather list.
  151. * There is no info as this is a write buffer or
  152. * read buffer, hence the request is bi-directional
  153. * to accommodate both read and write mappings.
  154. */
  155. alloc_data->table = dma_buf_map_attachment(alloc_data->attach,
  156. DMA_BIDIRECTIONAL);
  157. if (IS_ERR(alloc_data->table)) {
  158. rc = PTR_ERR(alloc_data->table);
  159. dev_err(cb_dev,
  160. "%s: Fail to map attachment, rc = %d\n",
  161. __func__, rc);
  162. goto detach_dma_buf;
  163. }
  164. /* physical address from mapping */
  165. if (!is_iova) {
  166. *addr = sg_phys(alloc_data->table->sgl);
  167. vaddr = msm_audio_ion_map_kernel((void *)dma_buf);
  168. if (IS_ERR_OR_NULL(vaddr)) {
  169. pr_err("%s: ION memory mapping for AUDIO failed\n",
  170. __func__);
  171. rc = -ENOMEM;
  172. goto detach_dma_buf;
  173. }
  174. alloc_data->vaddr = vaddr;
  175. } else {
  176. *addr = MSM_AUDIO_ION_PHYS_ADDR(alloc_data);
  177. }
  178. msm_audio_ion_add_allocation(&msm_audio_ion_data,
  179. alloc_data);
  180. return rc;
  181. detach_dma_buf:
  182. dma_buf_detach(alloc_data->dma_buf,
  183. alloc_data->attach);
  184. free_alloc_data:
  185. kfree(alloc_data);
  186. return rc;
  187. }
  188. static int msm_audio_dma_buf_unmap(struct dma_buf *dma_buf)
  189. {
  190. int rc = 0;
  191. struct msm_audio_alloc_data *alloc_data = NULL;
  192. struct list_head *ptr, *next;
  193. struct device *cb_dev = msm_audio_ion_data.cb_dev;
  194. bool found = false;
  195. /*
  196. * Though list_for_each_safe is delete safe, lock
  197. * should be explicitly acquired to avoid race condition
  198. * on adding elements to the list.
  199. */
  200. mutex_lock(&(msm_audio_ion_data.list_mutex));
  201. list_for_each_safe(ptr, next,
  202. &(msm_audio_ion_data.alloc_list)) {
  203. alloc_data = list_entry(ptr, struct msm_audio_alloc_data,
  204. list);
  205. if (alloc_data->dma_buf == dma_buf) {
  206. found = true;
  207. dma_buf_unmap_attachment(alloc_data->attach,
  208. alloc_data->table,
  209. DMA_BIDIRECTIONAL);
  210. dma_buf_detach(alloc_data->dma_buf,
  211. alloc_data->attach);
  212. dma_buf_put(alloc_data->dma_buf);
  213. list_del(&(alloc_data->list));
  214. kfree(alloc_data);
  215. break;
  216. }
  217. }
  218. mutex_unlock(&(msm_audio_ion_data.list_mutex));
  219. if (!found) {
  220. dev_err(cb_dev,
  221. "%s: cannot find allocation, dma_buf %pK",
  222. __func__, dma_buf);
  223. rc = -EINVAL;
  224. }
  225. return rc;
  226. }
  227. static int msm_audio_ion_get_phys(struct dma_buf *dma_buf,
  228. dma_addr_t *addr, size_t *len, bool is_iova)
  229. {
  230. int rc = 0;
  231. rc = msm_audio_dma_buf_map(dma_buf, addr, len, is_iova);
  232. if (rc) {
  233. pr_err("%s: failed to map DMA buf, err = %d\n",
  234. __func__, rc);
  235. goto err;
  236. }
  237. if (msm_audio_ion_data.smmu_enabled && is_iova) {
  238. /* Append the SMMU SID information to the IOVA address */
  239. *addr |= msm_audio_ion_data.smmu_sid_bits;
  240. }
  241. pr_debug("phys=%pK, len=%zd, rc=%d\n", &(*addr), *len, rc);
  242. err:
  243. return rc;
  244. }
  245. int msm_audio_ion_get_smmu_info(struct device **cb_dev,
  246. u64 *smmu_sid)
  247. {
  248. if (!cb_dev || !smmu_sid) {
  249. pr_err("%s: Invalid params\n",
  250. __func__);
  251. return -EINVAL;
  252. }
  253. if (!msm_audio_ion_data.cb_dev ||
  254. !msm_audio_ion_data.smmu_sid_bits) {
  255. pr_err("%s: Params not initialized\n",
  256. __func__);
  257. return -EINVAL;
  258. }
  259. *cb_dev = msm_audio_ion_data.cb_dev;
  260. *smmu_sid = msm_audio_ion_data.smmu_sid_bits;
  261. return 0;
  262. }
  263. static int msm_audio_ion_unmap_kernel(struct dma_buf *dma_buf)
  264. {
  265. int rc = 0;
  266. void *vaddr = NULL;
  267. struct msm_audio_alloc_data *alloc_data = NULL;
  268. struct device *cb_dev = msm_audio_ion_data.cb_dev;
  269. /*
  270. * TBD: remove the below section once new API
  271. * for unmapping kernel virtual address is available.
  272. */
  273. mutex_lock(&(msm_audio_ion_data.list_mutex));
  274. list_for_each_entry(alloc_data, &(msm_audio_ion_data.alloc_list),
  275. list) {
  276. if (alloc_data->dma_buf == dma_buf) {
  277. vaddr = alloc_data->vaddr;
  278. break;
  279. }
  280. }
  281. mutex_unlock(&(msm_audio_ion_data.list_mutex));
  282. if (!vaddr) {
  283. dev_err(cb_dev,
  284. "%s: cannot find allocation for dma_buf %pK",
  285. __func__, dma_buf);
  286. rc = -EINVAL;
  287. goto err;
  288. }
  289. dma_buf_vunmap(dma_buf, vaddr);
  290. rc = dma_buf_end_cpu_access(dma_buf, DMA_BIDIRECTIONAL);
  291. if (rc) {
  292. dev_err(cb_dev, "%s: kmap dma_buf_end_cpu_access fail\n",
  293. __func__);
  294. goto err;
  295. }
  296. err:
  297. return rc;
  298. }
  299. static int msm_audio_ion_map_buf(struct dma_buf *dma_buf, dma_addr_t *paddr,
  300. size_t *plen, void **vaddr)
  301. {
  302. int rc = 0;
  303. bool is_iova = true;
  304. rc = msm_audio_ion_get_phys(dma_buf, paddr, plen, is_iova);
  305. if (rc) {
  306. pr_err("%s: ION Get Physical for AUDIO failed, rc = %d\n",
  307. __func__, rc);
  308. dma_buf_put(dma_buf);
  309. goto err;
  310. }
  311. *vaddr = msm_audio_ion_map_kernel(dma_buf);
  312. if (IS_ERR_OR_NULL(*vaddr)) {
  313. pr_err("%s: ION memory mapping for AUDIO failed\n", __func__);
  314. rc = -ENOMEM;
  315. msm_audio_dma_buf_unmap(dma_buf);
  316. goto err;
  317. }
  318. err:
  319. return rc;
  320. }
  321. static u32 msm_audio_ion_get_smmu_sid_mode32(void)
  322. {
  323. if (msm_audio_ion_data.smmu_enabled)
  324. return upper_32_bits(msm_audio_ion_data.smmu_sid_bits);
  325. else
  326. return 0;
  327. }
  328. /**
  329. * msm_audio_ion_alloc -
  330. * Allocs ION memory for given client name
  331. *
  332. * @dma_buf: dma_buf for the ION memory
  333. * @bufsz: buffer size
  334. * @paddr: Physical address to be assigned with allocated region
  335. * @plen: length of allocated region to be assigned
  336. * vaddr: virtual address to be assigned
  337. *
  338. * Returns 0 on success or error on failure
  339. */
  340. int msm_audio_ion_alloc(struct dma_buf **dma_buf, size_t bufsz,
  341. dma_addr_t *paddr, size_t *plen, void **vaddr)
  342. {
  343. int rc = -EINVAL;
  344. unsigned long err_ion_ptr = 0;
  345. if (!(msm_audio_ion_data.device_status & MSM_AUDIO_ION_PROBED)) {
  346. pr_debug("%s:probe is not done, deferred\n", __func__);
  347. return -EPROBE_DEFER;
  348. }
  349. if (!dma_buf || !paddr || !vaddr || !bufsz || !plen) {
  350. pr_err("%s: Invalid params\n", __func__);
  351. return -EINVAL;
  352. }
  353. if (msm_audio_ion_data.smmu_enabled == true) {
  354. pr_debug("%s: system heap is used\n", __func__);
  355. *dma_buf = ion_alloc(bufsz, ION_HEAP(ION_SYSTEM_HEAP_ID), 0);
  356. } else {
  357. pr_debug("%s: audio heap is used\n", __func__);
  358. *dma_buf = ion_alloc(bufsz, ION_HEAP(ION_AUDIO_HEAP_ID), 0);
  359. }
  360. if (IS_ERR_OR_NULL((void *)(*dma_buf))) {
  361. if (IS_ERR((void *)(*dma_buf)))
  362. err_ion_ptr = PTR_ERR((int *)(*dma_buf));
  363. pr_err("%s: ION alloc fail err ptr=%ld, smmu_enabled=%d\n",
  364. __func__, err_ion_ptr, msm_audio_ion_data.smmu_enabled);
  365. rc = -ENOMEM;
  366. goto err;
  367. }
  368. rc = msm_audio_ion_map_buf(*dma_buf, paddr, plen, vaddr);
  369. if (rc) {
  370. pr_err("%s: failed to map ION buf, rc = %d\n", __func__, rc);
  371. goto err;
  372. }
  373. pr_debug("%s: mapped address = %pK, size=%zd\n", __func__,
  374. *vaddr, bufsz);
  375. memset(*vaddr, 0, bufsz);
  376. // Cleanp dmabuf if error?
  377. err:
  378. return rc;
  379. }
  380. EXPORT_SYMBOL(msm_audio_ion_alloc);
  381. int msm_audio_ion_phys_free(void *handle,
  382. dma_addr_t *paddr,
  383. size_t *pa_len,
  384. u8 assign_type,
  385. int id,
  386. int key)
  387. {
  388. handle = NULL;
  389. return 0;
  390. }
  391. EXPORT_SYMBOL(msm_audio_ion_phys_free);
  392. int msm_audio_ion_phys_assign(void **handle, int fd,
  393. dma_addr_t *paddr, size_t *pa_len, u8 assign_type, int id)
  394. {
  395. *handle = NULL;
  396. return 0;
  397. }
  398. EXPORT_SYMBOL(msm_audio_ion_phys_assign);
  399. bool msm_audio_is_hypervisor_supported(void)
  400. {
  401. return !(msm_audio_ion_data.is_non_hypervisor);
  402. }
  403. EXPORT_SYMBOL(msm_audio_is_hypervisor_supported);
  404. /**
  405. * msm_audio_ion_dma_map -
  406. * Memory maps for a given DMA buffer
  407. *
  408. * @phys_addr: Physical address of DMA buffer to be mapped
  409. * @iova_base: IOVA address of memory mapped DMA buffer
  410. * @size: buffer size
  411. * @dir: DMA direction
  412. * Returns 0 on success or error on failure
  413. */
  414. int msm_audio_ion_dma_map(dma_addr_t *phys_addr, dma_addr_t *iova_base,
  415. u32 size, enum dma_data_direction dir)
  416. {
  417. dma_addr_t iova;
  418. struct device *cb_dev = msm_audio_ion_data.cb_dev;
  419. if (!phys_addr || !iova_base || !size)
  420. return -EINVAL;
  421. iova = dma_map_resource(cb_dev, *phys_addr, size,
  422. dir, 0);
  423. if (dma_mapping_error(cb_dev, iova)) {
  424. pr_err("%s: dma_mapping_error\n", __func__);
  425. return -EIO;
  426. }
  427. pr_debug("%s: dma_mapping_success iova:0x%lx\n", __func__,
  428. (unsigned long)iova);
  429. if (msm_audio_ion_data.smmu_enabled)
  430. /* Append the SMMU SID information to the IOVA address */
  431. iova |= msm_audio_ion_data.smmu_sid_bits;
  432. *iova_base = iova;
  433. return 0;
  434. }
  435. EXPORT_SYMBOL(msm_audio_ion_dma_map);
  436. void msm_audio_fd_list_debug(void)
  437. {
  438. struct msm_audio_fd_data *msm_audio_fd_data = NULL;
  439. list_for_each_entry(msm_audio_fd_data,
  440. &msm_audio_ion_data.fd_list, list) {
  441. pr_debug("%s fd %d handle %pK phy. addr %pK\n", __func__,
  442. msm_audio_fd_data->fd, msm_audio_fd_data->handle,
  443. (void *)msm_audio_fd_data->paddr);
  444. }
  445. }
  446. void msm_audio_update_fd_list(struct msm_audio_fd_data *msm_audio_fd_data)
  447. {
  448. struct msm_audio_fd_data *msm_audio_fd_data1 = NULL;
  449. mutex_lock(&(msm_audio_ion_data.list_mutex));
  450. list_for_each_entry(msm_audio_fd_data1,
  451. &msm_audio_ion_data.fd_list, list) {
  452. if (msm_audio_fd_data1->fd == msm_audio_fd_data->fd) {
  453. pr_err("%s fd already present, not updating the list",
  454. __func__);
  455. mutex_unlock(&(msm_audio_ion_data.list_mutex));
  456. return;
  457. }
  458. }
  459. list_add_tail(&msm_audio_fd_data->list, &msm_audio_ion_data.fd_list);
  460. mutex_unlock(&(msm_audio_ion_data.list_mutex));
  461. }
  462. void msm_audio_delete_fd_entry(void *handle)
  463. {
  464. struct msm_audio_fd_data *msm_audio_fd_data = NULL;
  465. struct list_head *ptr, *next;
  466. mutex_lock(&(msm_audio_ion_data.list_mutex));
  467. list_for_each_safe(ptr, next,
  468. &msm_audio_ion_data.fd_list) {
  469. msm_audio_fd_data = list_entry(ptr, struct msm_audio_fd_data,
  470. list);
  471. if (msm_audio_fd_data->handle == handle) {
  472. pr_debug("%s deleting handle %pK entry from list\n",
  473. __func__, handle);
  474. list_del(&(msm_audio_fd_data->list));
  475. kfree(msm_audio_fd_data);
  476. break;
  477. }
  478. }
  479. mutex_unlock(&(msm_audio_ion_data.list_mutex));
  480. }
  481. int msm_audio_get_phy_addr(int fd, dma_addr_t *paddr)
  482. {
  483. struct msm_audio_fd_data *msm_audio_fd_data = NULL;
  484. int status = -EINVAL;
  485. if (!paddr) {
  486. pr_err("%s Invalid paddr param status %d\n", __func__, status);
  487. return status;
  488. }
  489. pr_debug("%s, fd %d\n", __func__, fd);
  490. mutex_lock(&(msm_audio_ion_data.list_mutex));
  491. list_for_each_entry(msm_audio_fd_data,
  492. &msm_audio_ion_data.fd_list, list) {
  493. if (msm_audio_fd_data->fd == fd) {
  494. *paddr = msm_audio_fd_data->paddr;
  495. status = 0;
  496. pr_debug("%s Found fd %d paddr %pK\n",
  497. __func__, fd, paddr);
  498. mutex_unlock(&(msm_audio_ion_data.list_mutex));
  499. return status;
  500. }
  501. }
  502. mutex_unlock(&(msm_audio_ion_data.list_mutex));
  503. return status;
  504. }
  505. EXPORT_SYMBOL(msm_audio_get_phy_addr);
  506. void msm_audio_get_handle(int fd, void **handle)
  507. {
  508. struct msm_audio_fd_data *msm_audio_fd_data = NULL;
  509. pr_debug("%s fd %d\n", __func__, fd);
  510. mutex_lock(&(msm_audio_ion_data.list_mutex));
  511. list_for_each_entry(msm_audio_fd_data,
  512. &msm_audio_ion_data.fd_list, list) {
  513. if (msm_audio_fd_data->fd == fd) {
  514. *handle = (struct dma_buf *)msm_audio_fd_data->handle;
  515. pr_debug("%s handle %pK\n", __func__, *handle);
  516. break;
  517. }
  518. }
  519. mutex_unlock(&(msm_audio_ion_data.list_mutex));
  520. }
  521. /**
  522. * msm_audio_ion_import-
  523. * Import ION buffer with given file descriptor
  524. *
  525. * @dma_buf: dma_buf for the ION memory
  526. * @fd: file descriptor for the ION memory
  527. * @ionflag: flags associated with ION buffer
  528. * @bufsz: buffer size
  529. * @paddr: Physical address to be assigned with allocated region
  530. * @plen: length of allocated region to be assigned
  531. * vaddr: virtual address to be assigned
  532. *
  533. * Returns 0 on success or error on failure
  534. */
  535. int msm_audio_ion_import(struct dma_buf **dma_buf, int fd,
  536. unsigned long *ionflag, size_t bufsz,
  537. dma_addr_t *paddr, size_t *plen, void **vaddr)
  538. {
  539. int rc = 0;
  540. if (!(msm_audio_ion_data.device_status & MSM_AUDIO_ION_PROBED)) {
  541. pr_debug("%s: probe is not done, deferred\n", __func__);
  542. return -EPROBE_DEFER;
  543. }
  544. if (!dma_buf || !paddr || !vaddr || !plen) {
  545. pr_err("%s: Invalid params\n", __func__);
  546. return -EINVAL;
  547. }
  548. /* bufsz should be 0 and fd shouldn't be 0 as of now */
  549. *dma_buf = dma_buf_get(fd);
  550. pr_debug("%s: dma_buf =%pK, fd=%d\n", __func__, *dma_buf, fd);
  551. if (IS_ERR_OR_NULL((void *)(*dma_buf))) {
  552. pr_err("%s: dma_buf_get failed\n", __func__);
  553. rc = -EINVAL;
  554. goto err;
  555. }
  556. if (ionflag != NULL) {
  557. rc = dma_buf_get_flags(*dma_buf, ionflag);
  558. if (rc) {
  559. pr_err("%s: could not get flags for the dma_buf\n",
  560. __func__);
  561. goto err_ion_flag;
  562. }
  563. }
  564. rc = msm_audio_ion_map_buf(*dma_buf, paddr, plen, vaddr);
  565. if (rc) {
  566. pr_err("%s: failed to map ION buf, rc = %d\n", __func__, rc);
  567. goto err;
  568. }
  569. pr_debug("%s: mapped address = %pK, size=%zd\n", __func__,
  570. *vaddr, bufsz);
  571. return 0;
  572. err_ion_flag:
  573. dma_buf_put(*dma_buf);
  574. err:
  575. *dma_buf = NULL;
  576. return rc;
  577. }
  578. EXPORT_SYMBOL(msm_audio_ion_import);
  579. /**
  580. * msm_audio_ion_free -
  581. * fress ION memory for given client and handle
  582. *
  583. * @dma_buf: dma_buf for the ION memory
  584. *
  585. * Returns 0 on success or error on failure
  586. */
  587. int msm_audio_ion_free(struct dma_buf *dma_buf)
  588. {
  589. int ret = 0;
  590. if (!dma_buf) {
  591. pr_err("%s: dma_buf invalid\n", __func__);
  592. return -EINVAL;
  593. }
  594. ret = msm_audio_ion_unmap_kernel(dma_buf);
  595. if (ret)
  596. return ret;
  597. msm_audio_dma_buf_unmap(dma_buf);
  598. return 0;
  599. }
  600. EXPORT_SYMBOL(msm_audio_ion_free);
  601. /**
  602. * msm_audio_ion_crash_handler -
  603. * handles cleanup after userspace crashes.
  604. *
  605. * To be called from machine driver.
  606. */
  607. void msm_audio_ion_crash_handler(void)
  608. {
  609. struct msm_audio_fd_data *msm_audio_fd_data = NULL;
  610. struct list_head *ptr, *next;
  611. void *handle = NULL;
  612. pr_debug("Inside %s\n", __func__);
  613. list_for_each_entry(msm_audio_fd_data,
  614. &msm_audio_ion_data.fd_list, list) {
  615. handle = msm_audio_fd_data->handle;
  616. msm_audio_ion_free(handle);
  617. }
  618. list_for_each_safe(ptr, next,
  619. &msm_audio_ion_data.fd_list) {
  620. msm_audio_fd_data = list_entry(ptr, struct msm_audio_fd_data,
  621. list);
  622. list_del(&(msm_audio_fd_data->list));
  623. kfree(msm_audio_fd_data);
  624. }
  625. }
  626. EXPORT_SYMBOL(msm_audio_ion_crash_handler);
  627. /**
  628. * msm_audio_ion_mmap -
  629. * Audio ION memory map
  630. *
  631. * @abuff: audio buf pointer
  632. * @vma: virtual mem area
  633. *
  634. * Returns 0 on success or error on failure
  635. */
  636. int msm_audio_ion_mmap(struct audio_buffer *abuff,
  637. struct vm_area_struct *vma)
  638. {
  639. struct msm_audio_alloc_data *alloc_data = NULL;
  640. struct sg_table *table;
  641. unsigned long addr = vma->vm_start;
  642. unsigned long offset = vma->vm_pgoff * PAGE_SIZE;
  643. struct scatterlist *sg;
  644. unsigned int i;
  645. struct page *page;
  646. int ret = 0;
  647. bool found = false;
  648. struct device *cb_dev = msm_audio_ion_data.cb_dev;
  649. mutex_lock(&(msm_audio_ion_data.list_mutex));
  650. list_for_each_entry(alloc_data, &(msm_audio_ion_data.alloc_list),
  651. list) {
  652. if (alloc_data->dma_buf == abuff->dma_buf) {
  653. found = true;
  654. table = alloc_data->table;
  655. break;
  656. }
  657. }
  658. mutex_unlock(&(msm_audio_ion_data.list_mutex));
  659. if (!found) {
  660. dev_err(cb_dev,
  661. "%s: cannot find allocation, dma_buf %pK",
  662. __func__, abuff->dma_buf);
  663. return -EINVAL;
  664. }
  665. /* uncached */
  666. vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
  667. /* We need to check if a page is associated with this sg list because:
  668. * If the allocation came from a carveout we currently don't have
  669. * pages associated with carved out memory. This might change in the
  670. * future and we can remove this check and the else statement.
  671. */
  672. page = sg_page(table->sgl);
  673. if (page) {
  674. pr_debug("%s: page is NOT null\n", __func__);
  675. for_each_sg(table->sgl, sg, table->nents, i) {
  676. unsigned long remainder = vma->vm_end - addr;
  677. unsigned long len = sg->length;
  678. page = sg_page(sg);
  679. if (offset >= len) {
  680. offset -= len;
  681. continue;
  682. } else if (offset) {
  683. page += offset / PAGE_SIZE;
  684. len -= offset;
  685. offset = 0;
  686. }
  687. len = min(len, remainder);
  688. pr_debug("vma=%pK, addr=%x len=%ld vm_start=%x vm_end=%x vm_page_prot=%lu\n",
  689. vma, (unsigned int)addr, len,
  690. (unsigned int)vma->vm_start,
  691. (unsigned int)vma->vm_end,
  692. (unsigned long)pgprot_val(vma->vm_page_prot));
  693. remap_pfn_range(vma, addr, page_to_pfn(page), len,
  694. vma->vm_page_prot);
  695. addr += len;
  696. if (addr >= vma->vm_end)
  697. return 0;
  698. }
  699. } else {
  700. pr_debug("%s: page is NULL\n", __func__);
  701. ret = -EINVAL;
  702. }
  703. return ret;
  704. }
  705. EXPORT_SYMBOL(msm_audio_ion_mmap);
  706. /**
  707. * msm_audio_ion_cache_operations-
  708. * Cache operations on cached Audio ION buffers
  709. *
  710. * @abuff: audio buf pointer
  711. * @cache_op: cache operation to be performed
  712. *
  713. * Returns 0 on success or error on failure
  714. */
  715. int msm_audio_ion_cache_operations(struct audio_buffer *abuff, int cache_op)
  716. {
  717. unsigned long ionflag = 0;
  718. int rc = 0;
  719. if (!abuff) {
  720. pr_err("%s: Invalid params: %pK\n", __func__, abuff);
  721. return -EINVAL;
  722. }
  723. rc = dma_buf_get_flags(abuff->dma_buf, &ionflag);
  724. if (rc) {
  725. pr_err("%s: dma_buf_get_flags failed: %d\n", __func__, rc);
  726. goto cache_op_failed;
  727. }
  728. /* Has to be CACHED */
  729. if (ionflag & ION_FLAG_CACHED) {
  730. /* MSM_AUDIO_ION_INV_CACHES or MSM_AUDIO_ION_CLEAN_CACHES */
  731. switch (cache_op) {
  732. case MSM_AUDIO_ION_INV_CACHES:
  733. case MSM_AUDIO_ION_CLEAN_CACHES:
  734. dma_buf_begin_cpu_access(abuff->dma_buf,
  735. DMA_BIDIRECTIONAL);
  736. dma_buf_end_cpu_access(abuff->dma_buf,
  737. DMA_BIDIRECTIONAL);
  738. break;
  739. default:
  740. pr_err("%s: Invalid cache operation %d\n",
  741. __func__, cache_op);
  742. }
  743. } else {
  744. pr_err("%s: Cache ops called on uncached buffer: %pK\n",
  745. __func__, abuff->dma_buf);
  746. rc = -EINVAL;
  747. }
  748. cache_op_failed:
  749. return rc;
  750. }
  751. EXPORT_SYMBOL(msm_audio_ion_cache_operations);
  752. /**
  753. * msm_audio_populate_upper_32_bits -
  754. * retrieve upper 32bits of 64bit address
  755. *
  756. * @pa: 64bit physical address
  757. *
  758. */
  759. u32 msm_audio_populate_upper_32_bits(dma_addr_t pa)
  760. {
  761. if (sizeof(dma_addr_t) == sizeof(u32))
  762. return msm_audio_ion_get_smmu_sid_mode32();
  763. else
  764. return upper_32_bits(pa);
  765. }
  766. EXPORT_SYMBOL(msm_audio_populate_upper_32_bits);
  767. static int msm_audio_ion_open(struct inode *inode, struct file *file)
  768. {
  769. int ret = 0;
  770. struct msm_audio_ion_private *ion_data = container_of(inode->i_cdev,
  771. struct msm_audio_ion_private,
  772. cdev);
  773. struct device *dev = ion_data->chardev;
  774. pr_debug("Inside %s\n", __func__);
  775. get_device(dev);
  776. return ret;
  777. }
  778. static int msm_audio_ion_release(struct inode *inode, struct file *file)
  779. {
  780. struct msm_audio_ion_private *ion_data = container_of(inode->i_cdev,
  781. struct msm_audio_ion_private,
  782. cdev);
  783. struct device *dev = ion_data->chardev;
  784. pr_debug("Inside %s\n", __func__);
  785. put_device(dev);
  786. return 0;
  787. }
  788. static long msm_audio_ion_ioctl(struct file *file, unsigned int ioctl_num,
  789. unsigned long __user ioctl_param)
  790. {
  791. void *mem_handle;
  792. dma_addr_t paddr;
  793. size_t pa_len = 0;
  794. void *vaddr;
  795. int ret = 0;
  796. struct msm_audio_fd_data *msm_audio_fd_data = NULL;
  797. pr_debug("%s ioctl num %u\n", __func__, ioctl_num);
  798. switch (ioctl_num) {
  799. case IOCTL_MAP_PHYS_ADDR:
  800. msm_audio_fd_data = kzalloc((sizeof(struct msm_audio_fd_data)),
  801. GFP_KERNEL);
  802. if (!msm_audio_fd_data) {
  803. ret = -ENOMEM;
  804. pr_err("%s : Out of memory ret %d\n", __func__, ret);
  805. return ret;
  806. }
  807. ret = msm_audio_ion_import((struct dma_buf **)&mem_handle, (int)ioctl_param,
  808. NULL, 0, &paddr, &pa_len, &vaddr);
  809. if (ret < 0) {
  810. pr_err("%s Memory map Failed %d\n", __func__, ret);
  811. kfree(msm_audio_fd_data);
  812. return ret;
  813. }
  814. msm_audio_fd_data->fd = (int)ioctl_param;
  815. msm_audio_fd_data->handle = mem_handle;
  816. msm_audio_fd_data->paddr = paddr;
  817. msm_audio_update_fd_list(msm_audio_fd_data);
  818. break;
  819. case IOCTL_UNMAP_PHYS_ADDR:
  820. msm_audio_get_handle((int)ioctl_param, &mem_handle);
  821. ret = msm_audio_ion_free(mem_handle);
  822. if (ret < 0) {
  823. pr_err("%s Ion free failed %d\n", __func__, ret);
  824. return ret;
  825. }
  826. msm_audio_delete_fd_entry(mem_handle);
  827. break;
  828. default:
  829. pr_err("%s Entered default. Invalid ioctl num %u",
  830. __func__, ioctl_num);
  831. ret = -EINVAL;
  832. break;
  833. }
  834. return ret;
  835. }
  836. static int msm_audio_smmu_init(struct device *dev)
  837. {
  838. INIT_LIST_HEAD(&msm_audio_ion_data.alloc_list);
  839. mutex_init(&(msm_audio_ion_data.list_mutex));
  840. return 0;
  841. }
  842. static const struct of_device_id msm_audio_ion_dt_match[] = {
  843. { .compatible = "qcom,msm-audio-ion" },
  844. { }
  845. };
  846. MODULE_DEVICE_TABLE(of, msm_audio_ion_dt_match);
  847. static const struct file_operations msm_audio_ion_fops = {
  848. .owner = THIS_MODULE,
  849. .open = msm_audio_ion_open,
  850. .release = msm_audio_ion_release,
  851. .unlocked_ioctl = msm_audio_ion_ioctl,
  852. };
  853. static int msm_audio_ion_reg_chrdev(struct msm_audio_ion_private *ion_data)
  854. {
  855. int ret = 0;
  856. ret = alloc_chrdev_region(&ion_data->ion_major, 0,
  857. MINOR_NUMBER_COUNT, MSM_AUDIO_ION_DRIVER_NAME);
  858. if (ret < 0) {
  859. pr_err("%s alloc_chr_dev_region failed ret : %d\n",
  860. __func__, ret);
  861. return ret;
  862. }
  863. pr_debug("%s major number %d", __func__, MAJOR(ion_data->ion_major));
  864. ion_data->ion_class = class_create(THIS_MODULE,
  865. MSM_AUDIO_ION_DRIVER_NAME);
  866. if (IS_ERR(ion_data->ion_class)) {
  867. ret = PTR_ERR(ion_data->ion_class);
  868. pr_err("%s class create failed. ret : %d", __func__, ret);
  869. goto err_class;
  870. }
  871. ion_data->chardev = device_create(ion_data->ion_class, NULL,
  872. ion_data->ion_major, NULL,
  873. MSM_AUDIO_ION_DRIVER_NAME);
  874. if (IS_ERR(ion_data->chardev)) {
  875. ret = PTR_ERR(ion_data->chardev);
  876. pr_err("%s device create failed ret : %d\n", __func__, ret);
  877. goto err_device;
  878. }
  879. cdev_init(&ion_data->cdev, &msm_audio_ion_fops);
  880. ret = cdev_add(&ion_data->cdev, ion_data->ion_major, 1);
  881. if (ret) {
  882. pr_err("%s cdev add failed, ret : %d\n", __func__, ret);
  883. goto err_cdev;
  884. }
  885. return ret;
  886. err_cdev:
  887. device_destroy(ion_data->ion_class, ion_data->ion_major);
  888. err_device:
  889. class_destroy(ion_data->ion_class);
  890. err_class:
  891. unregister_chrdev_region(0, MINOR_NUMBER_COUNT);
  892. return ret;
  893. }
  894. static int msm_audio_ion_unreg_chrdev(struct msm_audio_ion_private *ion_data)
  895. {
  896. cdev_del(&ion_data->cdev);
  897. device_destroy(ion_data->ion_class, ion_data->ion_major);
  898. class_destroy(ion_data->ion_class);
  899. unregister_chrdev_region(0, MINOR_NUMBER_COUNT);
  900. return 0;
  901. }
  902. static int msm_audio_ion_probe(struct platform_device *pdev)
  903. {
  904. int rc = 0;
  905. u64 smmu_sid = 0;
  906. u64 smmu_sid_mask = 0;
  907. const char *msm_audio_ion_dt = "qcom,smmu-enabled";
  908. const char *msm_audio_ion_non_hyp = "qcom,non-hyp-assign";
  909. const char *msm_audio_ion_smmu = "qcom,smmu-version";
  910. const char *msm_audio_ion_smmu_sid_mask = "qcom,smmu-sid-mask";
  911. bool smmu_enabled;
  912. bool is_non_hypervisor_en;
  913. struct device *dev = &pdev->dev;
  914. struct of_phandle_args iommuspec;
  915. #ifndef CONFIG_GECKO_CORE
  916. enum apr_subsys_state q6_state;
  917. #endif
  918. dev_err(dev, "%s: msm_audio_ion_probe\n", __func__);
  919. if (dev->of_node == NULL) {
  920. dev_err(dev,
  921. "%s: device tree is not found\n",
  922. __func__);
  923. msm_audio_ion_data.smmu_enabled = 0;
  924. return 0;
  925. }
  926. is_non_hypervisor_en = of_property_read_bool(dev->of_node,
  927. msm_audio_ion_non_hyp);
  928. msm_audio_ion_data.is_non_hypervisor = is_non_hypervisor_en;
  929. smmu_enabled = of_property_read_bool(dev->of_node,
  930. msm_audio_ion_dt);
  931. msm_audio_ion_data.smmu_enabled = smmu_enabled;
  932. if (!smmu_enabled) {
  933. dev_dbg(dev, "%s: SMMU is Disabled\n", __func__);
  934. goto exit;
  935. }
  936. #ifndef CONFIG_GECKO_CORE
  937. q6_state = apr_get_q6_state();
  938. if (q6_state == APR_SUBSYS_DOWN) {
  939. dev_info(dev,
  940. "defering %s, adsp_state %d\n",
  941. __func__, q6_state);
  942. return -EPROBE_DEFER;
  943. }
  944. #endif
  945. dev_dbg(dev, "%s: adsp is ready\n", __func__);
  946. rc = of_property_read_u32(dev->of_node,
  947. msm_audio_ion_smmu,
  948. &msm_audio_ion_data.smmu_version);
  949. if (rc) {
  950. dev_err(dev,
  951. "%s: qcom,smmu_version missing in DT node\n",
  952. __func__);
  953. return rc;
  954. }
  955. dev_dbg(dev, "%s: SMMU is Enabled. SMMU version is (%d)",
  956. __func__, msm_audio_ion_data.smmu_version);
  957. /* Get SMMU SID information from Devicetree */
  958. rc = of_property_read_u64(dev->of_node,
  959. msm_audio_ion_smmu_sid_mask,
  960. &smmu_sid_mask);
  961. if (rc) {
  962. dev_err(dev,
  963. "%s: qcom,smmu-sid-mask missing in DT node, using default\n",
  964. __func__);
  965. smmu_sid_mask = 0xFFFFFFFFFFFFFFFF;
  966. }
  967. rc = of_parse_phandle_with_args(dev->of_node, "iommus",
  968. "#iommu-cells", 0, &iommuspec);
  969. if (rc)
  970. dev_err(dev, "%s: could not get smmu SID, ret = %d\n",
  971. __func__, rc);
  972. else
  973. smmu_sid = (iommuspec.args[0] & smmu_sid_mask);
  974. msm_audio_ion_data.smmu_sid_bits =
  975. smmu_sid << MSM_AUDIO_SMMU_SID_OFFSET;
  976. if (msm_audio_ion_data.smmu_version == 0x2) {
  977. rc = msm_audio_smmu_init(dev);
  978. } else {
  979. dev_err(dev, "%s: smmu version invalid %d\n",
  980. __func__, msm_audio_ion_data.smmu_version);
  981. rc = -EINVAL;
  982. }
  983. if (rc)
  984. dev_err(dev, "%s: smmu init failed, err = %d\n",
  985. __func__, rc);
  986. exit:
  987. if (!rc)
  988. msm_audio_ion_data.device_status |= MSM_AUDIO_ION_PROBED;
  989. msm_audio_ion_data.cb_dev = dev;
  990. INIT_LIST_HEAD(&msm_audio_ion_data.fd_list);
  991. rc = msm_audio_ion_reg_chrdev(&msm_audio_ion_data);
  992. if (rc) {
  993. pr_err("%s register char dev failed, rc : %d", __func__, rc);
  994. return rc;
  995. }
  996. return rc;
  997. }
  998. static int msm_audio_ion_remove(struct platform_device *pdev)
  999. {
  1000. struct device *audio_cb_dev;
  1001. audio_cb_dev = msm_audio_ion_data.cb_dev;
  1002. msm_audio_ion_data.smmu_enabled = 0;
  1003. msm_audio_ion_data.device_status = 0;
  1004. msm_audio_ion_unreg_chrdev(&msm_audio_ion_data);
  1005. return 0;
  1006. }
  1007. static struct platform_driver msm_audio_ion_driver = {
  1008. .driver = {
  1009. .name = "msm-audio-ion",
  1010. .owner = THIS_MODULE,
  1011. .of_match_table = msm_audio_ion_dt_match,
  1012. .suppress_bind_attrs = true,
  1013. },
  1014. .probe = msm_audio_ion_probe,
  1015. .remove = msm_audio_ion_remove,
  1016. };
  1017. int __init msm_audio_ion_init(void)
  1018. {
  1019. pr_debug("%s: msm_audio_ion_init called \n",__func__);
  1020. return platform_driver_register(&msm_audio_ion_driver);
  1021. }
  1022. void msm_audio_ion_exit(void)
  1023. {
  1024. platform_driver_unregister(&msm_audio_ion_driver);
  1025. }
  1026. module_init(msm_audio_ion_init);
  1027. module_exit(msm_audio_ion_exit);
  1028. MODULE_DESCRIPTION("MSM Audio ION module");
  1029. MODULE_LICENSE("GPL v2");