msm_audio_ion.c 31 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2013-2020, The Linux Foundation. All rights reserved.
  4. */
  5. #include <linux/init.h>
  6. #include <linux/kernel.h>
  7. #include <linux/module.h>
  8. #include <linux/err.h>
  9. #include <linux/delay.h>
  10. #include <linux/slab.h>
  11. #include <linux/mutex.h>
  12. #include <linux/list.h>
  13. #include <linux/dma-mapping.h>
  14. #include <linux/dma-buf.h>
  15. #include <linux/platform_device.h>
  16. #include <linux/of_device.h>
  17. #include <linux/export.h>
  18. #include <linux/ion.h>
  19. #include <linux/ioctl.h>
  20. #include <linux/cdev.h>
  21. #include <linux/fs.h>
  22. #include <linux/device.h>
  23. #ifndef CONFIG_SPF_CORE
  24. #include <ipc/apr.h>
  25. #endif
  26. #include <dsp/msm_audio_ion.h>
  27. #include <linux/msm_audio.h>
  28. #define MSM_AUDIO_ION_PROBED (1 << 0)
  29. #define MSM_AUDIO_ION_PHYS_ADDR(alloc_data) \
  30. alloc_data->table->sgl->dma_address
  31. #define MSM_AUDIO_SMMU_SID_OFFSET 32
  32. #define TZ_PIL_PROTECT_MEM_SUBSYS_ID 0x0C
  33. #define TZ_PIL_CLEAR_PROTECT_MEM_SUBSYS_ID 0x0D
  34. #define MSM_AUDIO_ION_DRIVER_NAME "msm_audio_ion"
  35. #define MINOR_NUMBER_COUNT 1
  36. struct msm_audio_ion_private {
  37. bool smmu_enabled;
  38. struct device *cb_dev;
  39. struct device *cb_cma_dev;
  40. u8 device_status;
  41. struct list_head alloc_list;
  42. struct mutex list_mutex;
  43. u64 smmu_sid_bits;
  44. u32 smmu_version;
  45. bool is_non_hypervisor;
  46. /*list to store fd, phy. addr and handle data */
  47. struct list_head fd_list;
  48. /*char dev related data */
  49. dev_t ion_major;
  50. struct class *ion_class;
  51. struct device *chardev;
  52. struct cdev cdev;
  53. };
  54. struct msm_audio_alloc_data {
  55. size_t len;
  56. void *vaddr;
  57. struct dma_buf *dma_buf;
  58. struct dma_buf_attachment *attach;
  59. struct sg_table *table;
  60. struct list_head list;
  61. };
  62. static struct msm_audio_ion_private msm_audio_ion_data = {0,};
  63. struct msm_audio_fd_data {
  64. int fd;
  65. void *handle;
  66. dma_addr_t paddr;
  67. struct list_head list;
  68. };
  69. static void msm_audio_ion_add_allocation(
  70. struct msm_audio_ion_private *msm_audio_ion_data,
  71. struct msm_audio_alloc_data *alloc_data)
  72. {
  73. /*
  74. * Since these APIs can be invoked by multiple
  75. * clients, there is need to make sure the list
  76. * of allocations is always protected
  77. */
  78. mutex_lock(&(msm_audio_ion_data->list_mutex));
  79. list_add_tail(&(alloc_data->list),
  80. &(msm_audio_ion_data->alloc_list));
  81. mutex_unlock(&(msm_audio_ion_data->list_mutex));
  82. }
  83. static void *msm_audio_ion_map_kernel(struct dma_buf *dma_buf)
  84. {
  85. int rc = 0;
  86. void *addr = NULL;
  87. struct msm_audio_alloc_data *alloc_data = NULL;
  88. rc = dma_buf_begin_cpu_access(dma_buf, DMA_BIDIRECTIONAL);
  89. if (rc) {
  90. pr_err("%s: kmap dma_buf_begin_cpu_access fail\n", __func__);
  91. goto exit;
  92. }
  93. addr = dma_buf_vmap(dma_buf);
  94. if (!addr) {
  95. pr_err("%s: kernel mapping of dma_buf failed\n",
  96. __func__);
  97. goto exit;
  98. }
  99. /*
  100. * TBD: remove the below section once new API
  101. * for mapping kernel virtual address is available.
  102. */
  103. mutex_lock(&(msm_audio_ion_data.list_mutex));
  104. list_for_each_entry(alloc_data, &(msm_audio_ion_data.alloc_list),
  105. list) {
  106. if (alloc_data->dma_buf == dma_buf) {
  107. alloc_data->vaddr = addr;
  108. break;
  109. }
  110. }
  111. mutex_unlock(&(msm_audio_ion_data.list_mutex));
  112. exit:
  113. return addr;
  114. }
  115. static int msm_audio_dma_buf_map(struct dma_buf *dma_buf,
  116. dma_addr_t *addr, size_t *len, bool is_iova,
  117. bool cma_mem)
  118. {
  119. struct msm_audio_alloc_data *alloc_data = NULL;
  120. struct device *cb_dev;
  121. unsigned long ionflag = 0;
  122. int rc = 0;
  123. void *vaddr = NULL;
  124. if (cma_mem)
  125. cb_dev = msm_audio_ion_data.cb_cma_dev;
  126. else
  127. cb_dev = msm_audio_ion_data.cb_dev;
  128. /* Data required per buffer mapping */
  129. alloc_data = kzalloc(sizeof(*alloc_data), GFP_KERNEL);
  130. if (!alloc_data)
  131. return -ENOMEM;
  132. alloc_data->dma_buf = dma_buf;
  133. alloc_data->len = dma_buf->size;
  134. *len = dma_buf->size;
  135. /* Attach the dma_buf to context bank device */
  136. alloc_data->attach = dma_buf_attach(alloc_data->dma_buf,
  137. cb_dev);
  138. if (IS_ERR(alloc_data->attach)) {
  139. rc = PTR_ERR(alloc_data->attach);
  140. dev_err(cb_dev,
  141. "%s: Fail to attach dma_buf to CB, rc = %d\n",
  142. __func__, rc);
  143. goto free_alloc_data;
  144. }
  145. /* For uncached buffers, avoid cache maintanance */
  146. rc = dma_buf_get_flags(alloc_data->dma_buf, &ionflag);
  147. if (rc) {
  148. dev_err(cb_dev, "%s: dma_buf_get_flags failed: %d\n",
  149. __func__, rc);
  150. goto detach_dma_buf;
  151. }
  152. if (!(ionflag & ION_FLAG_CACHED))
  153. alloc_data->attach->dma_map_attrs |= DMA_ATTR_SKIP_CPU_SYNC;
  154. /*
  155. * Get the scatter-gather list.
  156. * There is no info as this is a write buffer or
  157. * read buffer, hence the request is bi-directional
  158. * to accommodate both read and write mappings.
  159. */
  160. alloc_data->table = dma_buf_map_attachment(alloc_data->attach,
  161. DMA_BIDIRECTIONAL);
  162. if (IS_ERR(alloc_data->table)) {
  163. rc = PTR_ERR(alloc_data->table);
  164. dev_err(cb_dev,
  165. "%s: Fail to map attachment, rc = %d\n",
  166. __func__, rc);
  167. goto detach_dma_buf;
  168. }
  169. /* physical address from mapping */
  170. if (!is_iova) {
  171. *addr = sg_phys(alloc_data->table->sgl);
  172. vaddr = msm_audio_ion_map_kernel((void *)dma_buf);
  173. if (IS_ERR_OR_NULL(vaddr)) {
  174. pr_err("%s: ION memory mapping for AUDIO failed\n",
  175. __func__);
  176. rc = -ENOMEM;
  177. goto detach_dma_buf;
  178. }
  179. alloc_data->vaddr = vaddr;
  180. } else {
  181. *addr = MSM_AUDIO_ION_PHYS_ADDR(alloc_data);
  182. }
  183. msm_audio_ion_add_allocation(&msm_audio_ion_data,
  184. alloc_data);
  185. return rc;
  186. detach_dma_buf:
  187. dma_buf_detach(alloc_data->dma_buf,
  188. alloc_data->attach);
  189. free_alloc_data:
  190. kfree(alloc_data);
  191. alloc_data = NULL;
  192. return rc;
  193. }
  194. static int msm_audio_dma_buf_unmap(struct dma_buf *dma_buf, bool cma_mem)
  195. {
  196. int rc = 0;
  197. struct msm_audio_alloc_data *alloc_data = NULL;
  198. struct list_head *ptr, *next;
  199. struct device *cb_dev;
  200. bool found = false;
  201. if (cma_mem)
  202. cb_dev = msm_audio_ion_data.cb_cma_dev;
  203. else
  204. cb_dev = msm_audio_ion_data.cb_dev;
  205. /*
  206. * Though list_for_each_safe is delete safe, lock
  207. * should be explicitly acquired to avoid race condition
  208. * on adding elements to the list.
  209. */
  210. mutex_lock(&(msm_audio_ion_data.list_mutex));
  211. list_for_each_safe(ptr, next,
  212. &(msm_audio_ion_data.alloc_list)) {
  213. alloc_data = list_entry(ptr, struct msm_audio_alloc_data,
  214. list);
  215. if (alloc_data->dma_buf == dma_buf) {
  216. found = true;
  217. dma_buf_unmap_attachment(alloc_data->attach,
  218. alloc_data->table,
  219. DMA_BIDIRECTIONAL);
  220. dma_buf_detach(alloc_data->dma_buf,
  221. alloc_data->attach);
  222. dma_buf_put(alloc_data->dma_buf);
  223. list_del(&(alloc_data->list));
  224. kfree(alloc_data);
  225. alloc_data = NULL;
  226. break;
  227. }
  228. }
  229. mutex_unlock(&(msm_audio_ion_data.list_mutex));
  230. if (!found) {
  231. dev_err(cb_dev,
  232. "%s: cannot find allocation, dma_buf %pK",
  233. __func__, dma_buf);
  234. rc = -EINVAL;
  235. }
  236. return rc;
  237. }
  238. static int msm_audio_ion_get_phys(struct dma_buf *dma_buf,
  239. dma_addr_t *addr, size_t *len, bool is_iova)
  240. {
  241. int rc = 0;
  242. rc = msm_audio_dma_buf_map(dma_buf, addr, len, is_iova, false);
  243. if (rc) {
  244. pr_err("%s: failed to map DMA buf, err = %d\n",
  245. __func__, rc);
  246. goto err;
  247. }
  248. if (msm_audio_ion_data.smmu_enabled && is_iova) {
  249. /* Append the SMMU SID information to the IOVA address */
  250. *addr |= msm_audio_ion_data.smmu_sid_bits;
  251. }
  252. pr_debug("phys=%pK, len=%zd, rc=%d\n", &(*addr), *len, rc);
  253. err:
  254. return rc;
  255. }
  256. int msm_audio_ion_get_smmu_info(struct device **cb_dev,
  257. u64 *smmu_sid)
  258. {
  259. if (!cb_dev || !smmu_sid) {
  260. pr_err("%s: Invalid params\n",
  261. __func__);
  262. return -EINVAL;
  263. }
  264. if (!msm_audio_ion_data.cb_dev ||
  265. !msm_audio_ion_data.smmu_sid_bits) {
  266. pr_err("%s: Params not initialized\n",
  267. __func__);
  268. return -EINVAL;
  269. }
  270. *cb_dev = msm_audio_ion_data.cb_dev;
  271. *smmu_sid = msm_audio_ion_data.smmu_sid_bits;
  272. return 0;
  273. }
  274. static int msm_audio_ion_unmap_kernel(struct dma_buf *dma_buf)
  275. {
  276. int rc = 0;
  277. void *vaddr = NULL;
  278. struct msm_audio_alloc_data *alloc_data = NULL;
  279. struct device *cb_dev = msm_audio_ion_data.cb_dev;
  280. /*
  281. * TBD: remove the below section once new API
  282. * for unmapping kernel virtual address is available.
  283. */
  284. mutex_lock(&(msm_audio_ion_data.list_mutex));
  285. list_for_each_entry(alloc_data, &(msm_audio_ion_data.alloc_list),
  286. list) {
  287. if (alloc_data->dma_buf == dma_buf) {
  288. vaddr = alloc_data->vaddr;
  289. break;
  290. }
  291. }
  292. mutex_unlock(&(msm_audio_ion_data.list_mutex));
  293. if (!vaddr) {
  294. dev_err(cb_dev,
  295. "%s: cannot find allocation for dma_buf %pK",
  296. __func__, dma_buf);
  297. rc = -EINVAL;
  298. goto err;
  299. }
  300. dma_buf_vunmap(dma_buf, vaddr);
  301. rc = dma_buf_end_cpu_access(dma_buf, DMA_BIDIRECTIONAL);
  302. if (rc) {
  303. dev_err(cb_dev, "%s: kmap dma_buf_end_cpu_access fail\n",
  304. __func__);
  305. goto err;
  306. }
  307. err:
  308. return rc;
  309. }
  310. static int msm_audio_ion_map_buf(struct dma_buf *dma_buf, dma_addr_t *paddr,
  311. size_t *plen, void **vaddr)
  312. {
  313. int rc = 0;
  314. bool is_iova = true;
  315. if (!dma_buf || !paddr || !vaddr || !plen) {
  316. pr_err("%s: Invalid params\n", __func__);
  317. return -EINVAL;
  318. }
  319. rc = msm_audio_ion_get_phys(dma_buf, paddr, plen, is_iova);
  320. if (rc) {
  321. pr_err("%s: ION Get Physical for AUDIO failed, rc = %d\n",
  322. __func__, rc);
  323. dma_buf_put(dma_buf);
  324. goto err;
  325. }
  326. *vaddr = msm_audio_ion_map_kernel(dma_buf);
  327. if (IS_ERR_OR_NULL(*vaddr)) {
  328. pr_err("%s: ION memory mapping for AUDIO failed\n", __func__);
  329. rc = -ENOMEM;
  330. msm_audio_dma_buf_unmap(dma_buf, false);
  331. goto err;
  332. }
  333. err:
  334. return rc;
  335. }
  336. static u32 msm_audio_ion_get_smmu_sid_mode32(void)
  337. {
  338. if (msm_audio_ion_data.smmu_enabled)
  339. return upper_32_bits(msm_audio_ion_data.smmu_sid_bits);
  340. else
  341. return 0;
  342. }
  343. /**
  344. * msm_audio_ion_alloc -
  345. * Allocs ION memory for given client name
  346. *
  347. * @dma_buf: dma_buf for the ION memory
  348. * @bufsz: buffer size
  349. * @paddr: Physical address to be assigned with allocated region
  350. * @plen: length of allocated region to be assigned
  351. * vaddr: virtual address to be assigned
  352. *
  353. * Returns 0 on success or error on failure
  354. */
  355. int msm_audio_ion_alloc(struct dma_buf **dma_buf, size_t bufsz,
  356. dma_addr_t *paddr, size_t *plen, void **vaddr)
  357. {
  358. int rc = -EINVAL;
  359. unsigned long err_ion_ptr = 0;
  360. if (!(msm_audio_ion_data.device_status & MSM_AUDIO_ION_PROBED)) {
  361. pr_debug("%s:probe is not done, deferred\n", __func__);
  362. return -EPROBE_DEFER;
  363. }
  364. if (!dma_buf || !paddr || !vaddr || !bufsz || !plen) {
  365. pr_err("%s: Invalid params\n", __func__);
  366. return -EINVAL;
  367. }
  368. if (msm_audio_ion_data.smmu_enabled == true) {
  369. pr_debug("%s: system heap is used\n", __func__);
  370. *dma_buf = ion_alloc(bufsz, ION_HEAP(ION_SYSTEM_HEAP_ID), 0);
  371. } else {
  372. pr_debug("%s: audio heap is used\n", __func__);
  373. *dma_buf = ion_alloc(bufsz, ION_HEAP(ION_AUDIO_HEAP_ID), 0);
  374. }
  375. if (IS_ERR_OR_NULL((void *)(*dma_buf))) {
  376. if (IS_ERR((void *)(*dma_buf)))
  377. err_ion_ptr = PTR_ERR((int *)(*dma_buf));
  378. pr_err("%s: ION alloc fail err ptr=%ld, smmu_enabled=%d\n",
  379. __func__, err_ion_ptr, msm_audio_ion_data.smmu_enabled);
  380. rc = -ENOMEM;
  381. goto err;
  382. }
  383. rc = msm_audio_ion_map_buf(*dma_buf, paddr, plen, vaddr);
  384. if (rc) {
  385. pr_err("%s: failed to map ION buf, rc = %d\n", __func__, rc);
  386. goto err;
  387. }
  388. pr_debug("%s: mapped address = %pK, size=%zd\n", __func__,
  389. *vaddr, bufsz);
  390. memset(*vaddr, 0, bufsz);
  391. // Cleanp dmabuf if error?
  392. err:
  393. return rc;
  394. }
  395. EXPORT_SYMBOL(msm_audio_ion_alloc);
  396. int msm_audio_ion_phys_free(void *handle,
  397. dma_addr_t *paddr,
  398. size_t *pa_len,
  399. u8 assign_type,
  400. int id,
  401. int key)
  402. {
  403. handle = NULL;
  404. return 0;
  405. }
  406. EXPORT_SYMBOL(msm_audio_ion_phys_free);
  407. int msm_audio_ion_phys_assign(void **handle, int fd,
  408. dma_addr_t *paddr, size_t *pa_len, u8 assign_type, int id)
  409. {
  410. *handle = NULL;
  411. return 0;
  412. }
  413. EXPORT_SYMBOL(msm_audio_ion_phys_assign);
  414. bool msm_audio_is_hypervisor_supported(void)
  415. {
  416. return !(msm_audio_ion_data.is_non_hypervisor);
  417. }
  418. EXPORT_SYMBOL(msm_audio_is_hypervisor_supported);
  419. /**
  420. * msm_audio_ion_dma_map -
  421. * Memory maps for a given DMA buffer
  422. *
  423. * @phys_addr: Physical address of DMA buffer to be mapped
  424. * @iova_base: IOVA address of memory mapped DMA buffer
  425. * @size: buffer size
  426. * @dir: DMA direction
  427. * Returns 0 on success or error on failure
  428. */
  429. int msm_audio_ion_dma_map(dma_addr_t *phys_addr, dma_addr_t *iova_base,
  430. u32 size, enum dma_data_direction dir)
  431. {
  432. dma_addr_t iova;
  433. struct device *cb_dev = msm_audio_ion_data.cb_dev;
  434. if (!phys_addr || !iova_base || !size)
  435. return -EINVAL;
  436. iova = dma_map_resource(cb_dev, *phys_addr, size,
  437. dir, 0);
  438. if (dma_mapping_error(cb_dev, iova)) {
  439. pr_err("%s: dma_mapping_error\n", __func__);
  440. return -EIO;
  441. }
  442. pr_debug("%s: dma_mapping_success iova:0x%lx\n", __func__,
  443. (unsigned long)iova);
  444. if (msm_audio_ion_data.smmu_enabled)
  445. /* Append the SMMU SID information to the IOVA address */
  446. iova |= msm_audio_ion_data.smmu_sid_bits;
  447. *iova_base = iova;
  448. return 0;
  449. }
  450. EXPORT_SYMBOL(msm_audio_ion_dma_map);
  451. void msm_audio_fd_list_debug(void)
  452. {
  453. struct msm_audio_fd_data *msm_audio_fd_data = NULL;
  454. list_for_each_entry(msm_audio_fd_data,
  455. &msm_audio_ion_data.fd_list, list) {
  456. pr_debug("%s fd %d handle %pK phy. addr %pK\n", __func__,
  457. msm_audio_fd_data->fd, msm_audio_fd_data->handle,
  458. (void *)msm_audio_fd_data->paddr);
  459. }
  460. }
  461. void msm_audio_update_fd_list(struct msm_audio_fd_data *msm_audio_fd_data)
  462. {
  463. struct msm_audio_fd_data *msm_audio_fd_data1 = NULL;
  464. mutex_lock(&(msm_audio_ion_data.list_mutex));
  465. list_for_each_entry(msm_audio_fd_data1,
  466. &msm_audio_ion_data.fd_list, list) {
  467. if (msm_audio_fd_data1->fd == msm_audio_fd_data->fd) {
  468. pr_err("%s fd already present, not updating the list",
  469. __func__);
  470. mutex_unlock(&(msm_audio_ion_data.list_mutex));
  471. return;
  472. }
  473. }
  474. list_add_tail(&msm_audio_fd_data->list, &msm_audio_ion_data.fd_list);
  475. mutex_unlock(&(msm_audio_ion_data.list_mutex));
  476. }
  477. void msm_audio_delete_fd_entry(void *handle)
  478. {
  479. struct msm_audio_fd_data *msm_audio_fd_data = NULL;
  480. struct list_head *ptr, *next;
  481. mutex_lock(&(msm_audio_ion_data.list_mutex));
  482. list_for_each_safe(ptr, next,
  483. &msm_audio_ion_data.fd_list) {
  484. msm_audio_fd_data = list_entry(ptr, struct msm_audio_fd_data,
  485. list);
  486. if (msm_audio_fd_data->handle == handle) {
  487. pr_debug("%s deleting handle %pK entry from list\n",
  488. __func__, handle);
  489. list_del(&(msm_audio_fd_data->list));
  490. kfree(msm_audio_fd_data);
  491. break;
  492. }
  493. }
  494. mutex_unlock(&(msm_audio_ion_data.list_mutex));
  495. }
  496. int msm_audio_get_phy_addr(int fd, dma_addr_t *paddr)
  497. {
  498. struct msm_audio_fd_data *msm_audio_fd_data = NULL;
  499. int status = -EINVAL;
  500. if (!paddr) {
  501. pr_err("%s Invalid paddr param status %d\n", __func__, status);
  502. return status;
  503. }
  504. pr_debug("%s, fd %d\n", __func__, fd);
  505. mutex_lock(&(msm_audio_ion_data.list_mutex));
  506. list_for_each_entry(msm_audio_fd_data,
  507. &msm_audio_ion_data.fd_list, list) {
  508. if (msm_audio_fd_data->fd == fd) {
  509. *paddr = msm_audio_fd_data->paddr;
  510. status = 0;
  511. pr_debug("%s Found fd %d paddr %pK\n",
  512. __func__, fd, paddr);
  513. mutex_unlock(&(msm_audio_ion_data.list_mutex));
  514. return status;
  515. }
  516. }
  517. mutex_unlock(&(msm_audio_ion_data.list_mutex));
  518. return status;
  519. }
  520. EXPORT_SYMBOL(msm_audio_get_phy_addr);
  521. void msm_audio_get_handle(int fd, void **handle)
  522. {
  523. struct msm_audio_fd_data *msm_audio_fd_data = NULL;
  524. pr_debug("%s fd %d\n", __func__, fd);
  525. mutex_lock(&(msm_audio_ion_data.list_mutex));
  526. list_for_each_entry(msm_audio_fd_data,
  527. &msm_audio_ion_data.fd_list, list) {
  528. if (msm_audio_fd_data->fd == fd) {
  529. *handle = (struct dma_buf *)msm_audio_fd_data->handle;
  530. pr_debug("%s handle %pK\n", __func__, *handle);
  531. break;
  532. }
  533. }
  534. mutex_unlock(&(msm_audio_ion_data.list_mutex));
  535. }
  536. /**
  537. * msm_audio_ion_import-
  538. * Import ION buffer with given file descriptor
  539. *
  540. * @dma_buf: dma_buf for the ION memory
  541. * @fd: file descriptor for the ION memory
  542. * @ionflag: flags associated with ION buffer
  543. * @bufsz: buffer size
  544. * @paddr: Physical address to be assigned with allocated region
  545. * @plen: length of allocated region to be assigned
  546. * @vaddr: virtual address to be assigned
  547. *
  548. * Returns 0 on success or error on failure
  549. */
  550. int msm_audio_ion_import(struct dma_buf **dma_buf, int fd,
  551. unsigned long *ionflag, size_t bufsz,
  552. dma_addr_t *paddr, size_t *plen, void **vaddr)
  553. {
  554. int rc = 0;
  555. if (!(msm_audio_ion_data.device_status & MSM_AUDIO_ION_PROBED)) {
  556. pr_debug("%s: probe is not done, deferred\n", __func__);
  557. return -EPROBE_DEFER;
  558. }
  559. if (!dma_buf || !paddr || !vaddr || !plen) {
  560. pr_err("%s: Invalid params\n", __func__);
  561. return -EINVAL;
  562. }
  563. /* bufsz should be 0 and fd shouldn't be 0 as of now */
  564. *dma_buf = dma_buf_get(fd);
  565. pr_debug("%s: dma_buf =%pK, fd=%d\n", __func__, *dma_buf, fd);
  566. if (IS_ERR_OR_NULL((void *)(*dma_buf))) {
  567. pr_err("%s: dma_buf_get failed\n", __func__);
  568. rc = -EINVAL;
  569. goto err;
  570. }
  571. if (ionflag != NULL) {
  572. rc = dma_buf_get_flags(*dma_buf, ionflag);
  573. if (rc) {
  574. pr_err("%s: could not get flags for the dma_buf\n",
  575. __func__);
  576. goto err_ion_flag;
  577. }
  578. }
  579. rc = msm_audio_ion_map_buf(*dma_buf, paddr, plen, vaddr);
  580. if (rc) {
  581. pr_err("%s: failed to map ION buf, rc = %d\n", __func__, rc);
  582. goto err;
  583. }
  584. pr_debug("%s: mapped address = %pK, size=%zd\n", __func__,
  585. *vaddr, bufsz);
  586. return 0;
  587. err_ion_flag:
  588. dma_buf_put(*dma_buf);
  589. err:
  590. *dma_buf = NULL;
  591. return rc;
  592. }
  593. EXPORT_SYMBOL(msm_audio_ion_import);
  594. /**
  595. * msm_audio_ion_import_cma-
  596. * Import ION buffer with given file descriptor
  597. *
  598. * @dma_buf: dma_buf for the ION memory
  599. * @fd: file descriptor for the ION memory
  600. * @ionflag: flags associated with ION buffer
  601. * @bufsz: buffer size
  602. * @paddr: Physical address to be assigned with allocated region
  603. * @plen: length of allocated region to be assigned
  604. * vaddr: virtual address to be assigned
  605. *
  606. * Returns 0 on success or error on failure
  607. */
  608. int msm_audio_ion_import_cma(struct dma_buf **dma_buf, int fd,
  609. unsigned long *ionflag, size_t bufsz,
  610. dma_addr_t *paddr, size_t *plen, void **vaddr)
  611. {
  612. int rc = 0;
  613. if (!(msm_audio_ion_data.device_status & MSM_AUDIO_ION_PROBED)) {
  614. pr_debug("%s: probe is not done, deferred\n", __func__);
  615. return -EPROBE_DEFER;
  616. }
  617. if (!dma_buf || !paddr || !vaddr || !plen ||
  618. !msm_audio_ion_data.cb_cma_dev) {
  619. pr_err("%s: Invalid params\n", __func__);
  620. return -EINVAL;
  621. }
  622. /* bufsz should be 0 and fd shouldn't be 0 as of now */
  623. *dma_buf = dma_buf_get(fd);
  624. pr_debug("%s: dma_buf =%pK, fd=%d\n", __func__, *dma_buf, fd);
  625. if (IS_ERR_OR_NULL((void *)(*dma_buf))) {
  626. pr_err("%s: dma_buf_get failed\n", __func__);
  627. rc = -EINVAL;
  628. goto err;
  629. }
  630. if (ionflag != NULL) {
  631. rc = dma_buf_get_flags(*dma_buf, ionflag);
  632. if (rc) {
  633. pr_err("%s: could not get flags for the dma_buf\n",
  634. __func__);
  635. goto err_ion_flag;
  636. }
  637. }
  638. msm_audio_dma_buf_map(*dma_buf, paddr, plen, true, true);
  639. return 0;
  640. err_ion_flag:
  641. dma_buf_put(*dma_buf);
  642. err:
  643. *dma_buf = NULL;
  644. return rc;
  645. }
  646. EXPORT_SYMBOL(msm_audio_ion_import_cma);
  647. /**
  648. * msm_audio_ion_free -
  649. * fress ION memory for given client and handle
  650. *
  651. * @dma_buf: dma_buf for the ION memory
  652. *
  653. * Returns 0 on success or error on failure
  654. */
  655. int msm_audio_ion_free(struct dma_buf *dma_buf)
  656. {
  657. int ret = 0;
  658. if (!dma_buf) {
  659. pr_err("%s: dma_buf invalid\n", __func__);
  660. return -EINVAL;
  661. }
  662. ret = msm_audio_ion_unmap_kernel(dma_buf);
  663. if (ret)
  664. return ret;
  665. msm_audio_dma_buf_unmap(dma_buf, false);
  666. return 0;
  667. }
  668. EXPORT_SYMBOL(msm_audio_ion_free);
  669. /**
  670. * msm_audio_ion_crash_handler -
  671. * handles cleanup after userspace crashes.
  672. *
  673. * To be called from machine driver.
  674. */
  675. void msm_audio_ion_crash_handler(void)
  676. {
  677. struct msm_audio_fd_data *msm_audio_fd_data = NULL;
  678. struct list_head *ptr, *next;
  679. void *handle = NULL;
  680. pr_debug("Inside %s\n", __func__);
  681. list_for_each_entry(msm_audio_fd_data,
  682. &msm_audio_ion_data.fd_list, list) {
  683. handle = msm_audio_fd_data->handle;
  684. msm_audio_ion_free(handle);
  685. }
  686. list_for_each_safe(ptr, next,
  687. &msm_audio_ion_data.fd_list) {
  688. msm_audio_fd_data = list_entry(ptr, struct msm_audio_fd_data,
  689. list);
  690. list_del(&(msm_audio_fd_data->list));
  691. kfree(msm_audio_fd_data);
  692. }
  693. }
  694. EXPORT_SYMBOL(msm_audio_ion_crash_handler);
  695. /**
  696. * msm_audio_ion_free_cma -
  697. * fress ION memory for given client and handle
  698. *
  699. * @dma_buf: dma_buf for the ION memory
  700. *
  701. * Returns 0 on success or error on failure
  702. */
  703. int msm_audio_ion_free_cma(struct dma_buf *dma_buf)
  704. {
  705. if (!dma_buf) {
  706. pr_err("%s: dma_buf invalid\n", __func__);
  707. return -EINVAL;
  708. }
  709. msm_audio_dma_buf_unmap(dma_buf, true);
  710. return 0;
  711. }
  712. EXPORT_SYMBOL(msm_audio_ion_free_cma);
  713. /**
  714. * msm_audio_ion_mmap -
  715. * Audio ION memory map
  716. *
  717. * @abuff: audio buf pointer
  718. * @vma: virtual mem area
  719. *
  720. * Returns 0 on success or error on failure
  721. */
  722. int msm_audio_ion_mmap(struct audio_buffer *abuff,
  723. struct vm_area_struct *vma)
  724. {
  725. struct msm_audio_alloc_data *alloc_data = NULL;
  726. struct sg_table *table;
  727. unsigned long addr = vma->vm_start;
  728. unsigned long offset = vma->vm_pgoff * PAGE_SIZE;
  729. struct scatterlist *sg;
  730. unsigned int i;
  731. struct page *page;
  732. int ret = 0;
  733. bool found = false;
  734. struct device *cb_dev = msm_audio_ion_data.cb_dev;
  735. mutex_lock(&(msm_audio_ion_data.list_mutex));
  736. list_for_each_entry(alloc_data, &(msm_audio_ion_data.alloc_list),
  737. list) {
  738. if (alloc_data->dma_buf == abuff->dma_buf) {
  739. found = true;
  740. table = alloc_data->table;
  741. break;
  742. }
  743. }
  744. mutex_unlock(&(msm_audio_ion_data.list_mutex));
  745. if (!found) {
  746. dev_err(cb_dev,
  747. "%s: cannot find allocation, dma_buf %pK",
  748. __func__, abuff->dma_buf);
  749. return -EINVAL;
  750. }
  751. /* uncached */
  752. vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
  753. /* We need to check if a page is associated with this sg list because:
  754. * If the allocation came from a carveout we currently don't have
  755. * pages associated with carved out memory. This might change in the
  756. * future and we can remove this check and the else statement.
  757. */
  758. page = sg_page(table->sgl);
  759. if (page) {
  760. pr_debug("%s: page is NOT null\n", __func__);
  761. for_each_sg(table->sgl, sg, table->nents, i) {
  762. unsigned long remainder = vma->vm_end - addr;
  763. unsigned long len = sg->length;
  764. page = sg_page(sg);
  765. if (offset >= len) {
  766. offset -= len;
  767. continue;
  768. } else if (offset) {
  769. page += offset / PAGE_SIZE;
  770. len -= offset;
  771. offset = 0;
  772. }
  773. len = min(len, remainder);
  774. pr_debug("vma=%pK, addr=%x len=%ld vm_start=%x vm_end=%x vm_page_prot=%lu\n",
  775. vma, (unsigned int)addr, len,
  776. (unsigned int)vma->vm_start,
  777. (unsigned int)vma->vm_end,
  778. (unsigned long)pgprot_val(vma->vm_page_prot));
  779. remap_pfn_range(vma, addr, page_to_pfn(page), len,
  780. vma->vm_page_prot);
  781. addr += len;
  782. if (addr >= vma->vm_end)
  783. return 0;
  784. }
  785. } else {
  786. pr_debug("%s: page is NULL\n", __func__);
  787. ret = -EINVAL;
  788. }
  789. return ret;
  790. }
  791. EXPORT_SYMBOL(msm_audio_ion_mmap);
  792. /**
  793. * msm_audio_ion_cache_operations-
  794. * Cache operations on cached Audio ION buffers
  795. *
  796. * @abuff: audio buf pointer
  797. * @cache_op: cache operation to be performed
  798. *
  799. * Returns 0 on success or error on failure
  800. */
  801. int msm_audio_ion_cache_operations(struct audio_buffer *abuff, int cache_op)
  802. {
  803. unsigned long ionflag = 0;
  804. int rc = 0;
  805. if (!abuff) {
  806. pr_err("%s: Invalid params: %pK\n", __func__, abuff);
  807. return -EINVAL;
  808. }
  809. rc = dma_buf_get_flags(abuff->dma_buf, &ionflag);
  810. if (rc) {
  811. pr_err("%s: dma_buf_get_flags failed: %d\n", __func__, rc);
  812. goto cache_op_failed;
  813. }
  814. /* Has to be CACHED */
  815. if (ionflag & ION_FLAG_CACHED) {
  816. /* MSM_AUDIO_ION_INV_CACHES or MSM_AUDIO_ION_CLEAN_CACHES */
  817. switch (cache_op) {
  818. case MSM_AUDIO_ION_INV_CACHES:
  819. case MSM_AUDIO_ION_CLEAN_CACHES:
  820. dma_buf_begin_cpu_access(abuff->dma_buf,
  821. DMA_BIDIRECTIONAL);
  822. dma_buf_end_cpu_access(abuff->dma_buf,
  823. DMA_BIDIRECTIONAL);
  824. break;
  825. default:
  826. pr_err("%s: Invalid cache operation %d\n",
  827. __func__, cache_op);
  828. }
  829. } else {
  830. pr_err("%s: Cache ops called on uncached buffer: %pK\n",
  831. __func__, abuff->dma_buf);
  832. rc = -EINVAL;
  833. }
  834. cache_op_failed:
  835. return rc;
  836. }
  837. EXPORT_SYMBOL(msm_audio_ion_cache_operations);
  838. /**
  839. * msm_audio_populate_upper_32_bits -
  840. * retrieve upper 32bits of 64bit address
  841. *
  842. * @pa: 64bit physical address
  843. *
  844. */
  845. u32 msm_audio_populate_upper_32_bits(dma_addr_t pa)
  846. {
  847. if (sizeof(dma_addr_t) == sizeof(u32))
  848. return msm_audio_ion_get_smmu_sid_mode32();
  849. else
  850. return upper_32_bits(pa);
  851. }
  852. EXPORT_SYMBOL(msm_audio_populate_upper_32_bits);
  853. static int msm_audio_ion_open(struct inode *inode, struct file *file)
  854. {
  855. int ret = 0;
  856. struct msm_audio_ion_private *ion_data = container_of(inode->i_cdev,
  857. struct msm_audio_ion_private,
  858. cdev);
  859. struct device *dev = ion_data->chardev;
  860. pr_debug("Inside %s\n", __func__);
  861. get_device(dev);
  862. return ret;
  863. }
  864. static int msm_audio_ion_release(struct inode *inode, struct file *file)
  865. {
  866. struct msm_audio_ion_private *ion_data = container_of(inode->i_cdev,
  867. struct msm_audio_ion_private,
  868. cdev);
  869. struct device *dev = ion_data->chardev;
  870. pr_debug("Inside %s\n", __func__);
  871. put_device(dev);
  872. return 0;
  873. }
  874. static long msm_audio_ion_ioctl(struct file *file, unsigned int ioctl_num,
  875. unsigned long __user ioctl_param)
  876. {
  877. void *mem_handle;
  878. dma_addr_t paddr;
  879. size_t pa_len = 0;
  880. void *vaddr;
  881. int ret = 0;
  882. struct msm_audio_fd_data *msm_audio_fd_data = NULL;
  883. pr_debug("%s ioctl num %u\n", __func__, ioctl_num);
  884. switch (ioctl_num) {
  885. case IOCTL_MAP_PHYS_ADDR:
  886. msm_audio_fd_data = kzalloc((sizeof(struct msm_audio_fd_data)),
  887. GFP_KERNEL);
  888. if (!msm_audio_fd_data) {
  889. ret = -ENOMEM;
  890. pr_err("%s : Out of memory ret %d\n", __func__, ret);
  891. return ret;
  892. }
  893. ret = msm_audio_ion_import((struct dma_buf **)&mem_handle, (int)ioctl_param,
  894. NULL, 0, &paddr, &pa_len, &vaddr);
  895. if (ret < 0) {
  896. pr_err("%s Memory map Failed %d\n", __func__, ret);
  897. kfree(msm_audio_fd_data);
  898. return ret;
  899. }
  900. msm_audio_fd_data->fd = (int)ioctl_param;
  901. msm_audio_fd_data->handle = mem_handle;
  902. msm_audio_fd_data->paddr = paddr;
  903. msm_audio_update_fd_list(msm_audio_fd_data);
  904. break;
  905. case IOCTL_UNMAP_PHYS_ADDR:
  906. msm_audio_get_handle((int)ioctl_param, &mem_handle);
  907. ret = msm_audio_ion_free(mem_handle);
  908. if (ret < 0) {
  909. pr_err("%s Ion free failed %d\n", __func__, ret);
  910. return ret;
  911. }
  912. msm_audio_delete_fd_entry(mem_handle);
  913. break;
  914. default:
  915. pr_err("%s Entered default. Invalid ioctl num %u",
  916. __func__, ioctl_num);
  917. ret = -EINVAL;
  918. break;
  919. }
  920. return ret;
  921. }
  922. static int msm_audio_smmu_init(struct device *dev)
  923. {
  924. INIT_LIST_HEAD(&msm_audio_ion_data.alloc_list);
  925. mutex_init(&(msm_audio_ion_data.list_mutex));
  926. return 0;
  927. }
  928. static const struct of_device_id msm_audio_ion_dt_match[] = {
  929. { .compatible = "qcom,msm-audio-ion" },
  930. { .compatible = "qcom,msm-audio-ion-cma"},
  931. { }
  932. };
  933. MODULE_DEVICE_TABLE(of, msm_audio_ion_dt_match);
  934. static const struct file_operations msm_audio_ion_fops = {
  935. .owner = THIS_MODULE,
  936. .open = msm_audio_ion_open,
  937. .release = msm_audio_ion_release,
  938. .unlocked_ioctl = msm_audio_ion_ioctl,
  939. };
  940. static int msm_audio_ion_reg_chrdev(struct msm_audio_ion_private *ion_data)
  941. {
  942. int ret = 0;
  943. ret = alloc_chrdev_region(&ion_data->ion_major, 0,
  944. MINOR_NUMBER_COUNT, MSM_AUDIO_ION_DRIVER_NAME);
  945. if (ret < 0) {
  946. pr_err("%s alloc_chr_dev_region failed ret : %d\n",
  947. __func__, ret);
  948. return ret;
  949. }
  950. pr_debug("%s major number %d", __func__, MAJOR(ion_data->ion_major));
  951. ion_data->ion_class = class_create(THIS_MODULE,
  952. MSM_AUDIO_ION_DRIVER_NAME);
  953. if (IS_ERR(ion_data->ion_class)) {
  954. ret = PTR_ERR(ion_data->ion_class);
  955. pr_err("%s class create failed. ret : %d", __func__, ret);
  956. goto err_class;
  957. }
  958. ion_data->chardev = device_create(ion_data->ion_class, NULL,
  959. ion_data->ion_major, NULL,
  960. MSM_AUDIO_ION_DRIVER_NAME);
  961. if (IS_ERR(ion_data->chardev)) {
  962. ret = PTR_ERR(ion_data->chardev);
  963. pr_err("%s device create failed ret : %d\n", __func__, ret);
  964. goto err_device;
  965. }
  966. cdev_init(&ion_data->cdev, &msm_audio_ion_fops);
  967. ret = cdev_add(&ion_data->cdev, ion_data->ion_major, 1);
  968. if (ret) {
  969. pr_err("%s cdev add failed, ret : %d\n", __func__, ret);
  970. goto err_cdev;
  971. }
  972. return ret;
  973. err_cdev:
  974. device_destroy(ion_data->ion_class, ion_data->ion_major);
  975. err_device:
  976. class_destroy(ion_data->ion_class);
  977. err_class:
  978. unregister_chrdev_region(0, MINOR_NUMBER_COUNT);
  979. return ret;
  980. }
  981. static int msm_audio_ion_unreg_chrdev(struct msm_audio_ion_private *ion_data)
  982. {
  983. cdev_del(&ion_data->cdev);
  984. device_destroy(ion_data->ion_class, ion_data->ion_major);
  985. class_destroy(ion_data->ion_class);
  986. unregister_chrdev_region(0, MINOR_NUMBER_COUNT);
  987. return 0;
  988. }
  989. static int msm_audio_ion_probe(struct platform_device *pdev)
  990. {
  991. int rc = 0;
  992. u64 smmu_sid = 0;
  993. u64 smmu_sid_mask = 0;
  994. const char *msm_audio_ion_dt = "qcom,smmu-enabled";
  995. const char *msm_audio_ion_non_hyp = "qcom,non-hyp-assign";
  996. const char *msm_audio_ion_smmu = "qcom,smmu-version";
  997. const char *msm_audio_ion_smmu_sid_mask = "qcom,smmu-sid-mask";
  998. bool smmu_enabled;
  999. bool is_non_hypervisor_en;
  1000. struct device *dev = &pdev->dev;
  1001. struct of_phandle_args iommuspec;
  1002. #ifndef CONFIG_SPF_CORE
  1003. enum apr_subsys_state q6_state;
  1004. #endif
  1005. dev_err(dev, "%s: msm_audio_ion_probe\n", __func__);
  1006. if (dev->of_node == NULL) {
  1007. dev_err(dev,
  1008. "%s: device tree is not found\n",
  1009. __func__);
  1010. msm_audio_ion_data.smmu_enabled = 0;
  1011. return 0;
  1012. }
  1013. is_non_hypervisor_en = of_property_read_bool(dev->of_node,
  1014. msm_audio_ion_non_hyp);
  1015. msm_audio_ion_data.is_non_hypervisor = is_non_hypervisor_en;
  1016. if (of_device_is_compatible(dev->of_node, "qcom,msm-audio-ion-cma")) {
  1017. msm_audio_ion_data.cb_cma_dev = dev;
  1018. return 0;
  1019. }
  1020. smmu_enabled = of_property_read_bool(dev->of_node,
  1021. msm_audio_ion_dt);
  1022. msm_audio_ion_data.smmu_enabled = smmu_enabled;
  1023. if (!smmu_enabled) {
  1024. dev_dbg(dev, "%s: SMMU is Disabled\n", __func__);
  1025. goto exit;
  1026. }
  1027. #ifndef CONFIG_SPF_CORE
  1028. q6_state = apr_get_q6_state();
  1029. if (q6_state == APR_SUBSYS_DOWN) {
  1030. dev_info(dev,
  1031. "defering %s, adsp_state %d\n",
  1032. __func__, q6_state);
  1033. return -EPROBE_DEFER;
  1034. }
  1035. #endif
  1036. dev_dbg(dev, "%s: adsp is ready\n", __func__);
  1037. rc = of_property_read_u32(dev->of_node,
  1038. msm_audio_ion_smmu,
  1039. &msm_audio_ion_data.smmu_version);
  1040. if (rc) {
  1041. dev_err(dev,
  1042. "%s: qcom,smmu_version missing in DT node\n",
  1043. __func__);
  1044. return rc;
  1045. }
  1046. dev_dbg(dev, "%s: SMMU is Enabled. SMMU version is (%d)",
  1047. __func__, msm_audio_ion_data.smmu_version);
  1048. /* Get SMMU SID information from Devicetree */
  1049. rc = of_property_read_u64(dev->of_node,
  1050. msm_audio_ion_smmu_sid_mask,
  1051. &smmu_sid_mask);
  1052. if (rc) {
  1053. dev_err(dev,
  1054. "%s: qcom,smmu-sid-mask missing in DT node, using default\n",
  1055. __func__);
  1056. smmu_sid_mask = 0xFFFFFFFFFFFFFFFF;
  1057. }
  1058. rc = of_parse_phandle_with_args(dev->of_node, "iommus",
  1059. "#iommu-cells", 0, &iommuspec);
  1060. if (rc)
  1061. dev_err(dev, "%s: could not get smmu SID, ret = %d\n",
  1062. __func__, rc);
  1063. else
  1064. smmu_sid = (iommuspec.args[0] & smmu_sid_mask);
  1065. msm_audio_ion_data.smmu_sid_bits =
  1066. smmu_sid << MSM_AUDIO_SMMU_SID_OFFSET;
  1067. if (msm_audio_ion_data.smmu_version == 0x2) {
  1068. rc = msm_audio_smmu_init(dev);
  1069. } else {
  1070. dev_err(dev, "%s: smmu version invalid %d\n",
  1071. __func__, msm_audio_ion_data.smmu_version);
  1072. rc = -EINVAL;
  1073. }
  1074. if (rc)
  1075. dev_err(dev, "%s: smmu init failed, err = %d\n",
  1076. __func__, rc);
  1077. exit:
  1078. if (!rc)
  1079. msm_audio_ion_data.device_status |= MSM_AUDIO_ION_PROBED;
  1080. msm_audio_ion_data.cb_dev = dev;
  1081. INIT_LIST_HEAD(&msm_audio_ion_data.fd_list);
  1082. rc = msm_audio_ion_reg_chrdev(&msm_audio_ion_data);
  1083. if (rc) {
  1084. pr_err("%s register char dev failed, rc : %d", __func__, rc);
  1085. return rc;
  1086. }
  1087. return rc;
  1088. }
  1089. static int msm_audio_ion_remove(struct platform_device *pdev)
  1090. {
  1091. struct device *audio_cb_dev;
  1092. audio_cb_dev = msm_audio_ion_data.cb_dev;
  1093. msm_audio_ion_data.smmu_enabled = 0;
  1094. msm_audio_ion_data.device_status = 0;
  1095. msm_audio_ion_unreg_chrdev(&msm_audio_ion_data);
  1096. return 0;
  1097. }
  1098. static struct platform_driver msm_audio_ion_driver = {
  1099. .driver = {
  1100. .name = "msm-audio-ion",
  1101. .owner = THIS_MODULE,
  1102. .of_match_table = msm_audio_ion_dt_match,
  1103. .suppress_bind_attrs = true,
  1104. },
  1105. .probe = msm_audio_ion_probe,
  1106. .remove = msm_audio_ion_remove,
  1107. };
  1108. int __init msm_audio_ion_init(void)
  1109. {
  1110. pr_debug("%s: msm_audio_ion_init called \n",__func__);
  1111. return platform_driver_register(&msm_audio_ion_driver);
  1112. }
  1113. void msm_audio_ion_exit(void)
  1114. {
  1115. platform_driver_unregister(&msm_audio_ion_driver);
  1116. }
  1117. module_init(msm_audio_ion_init);
  1118. module_exit(msm_audio_ion_exit);
  1119. MODULE_DESCRIPTION("MSM Audio ION module");
  1120. MODULE_LICENSE("GPL v2");