msm_mdf.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2018, The Linux Foundation. All rights reserved.
  4. */
  5. #include <linux/init.h>
  6. #include <linux/kernel.h>
  7. #include <linux/module.h>
  8. #include <linux/err.h>
  9. #include <linux/clk.h>
  10. #include <linux/delay.h>
  11. #include <linux/slab.h>
  12. #include <linux/list.h>
  13. #include <linux/scatterlist.h>
  14. #include <linux/dma-mapping.h>
  15. #include <linux/dma-buf.h>
  16. #include <linux/iommu.h>
  17. #include <linux/platform_device.h>
  18. #include <linux/of_device.h>
  19. #include <linux/ion_kernel.h>
  20. #include <linux/msm_ion.h>
  21. #include <dsp/msm_audio_ion.h>
  22. #include <ipc/apr.h>
  23. #include <dsp/msm_mdf.h>
  24. #include <asm/dma-iommu.h>
  25. #include <soc/qcom/secure_buffer.h>
  26. #include <soc/qcom/subsystem_notif.h>
  27. #include <soc/qcom/subsystem_restart.h>
  28. #include <soc/qcom/scm.h>
  29. #include <dsp/q6audio-v2.h>
  30. #include <dsp/q6core.h>
  31. #include <asm/cacheflush.h>
  32. #define VMID_SSC_Q6 5
  33. #define VMID_LPASS 6
  34. #define VMID_MSS_MSA 15
  35. #define VMID_CDSP 30
  36. #define MSM_MDF_PROBED (1 << 0)
  37. #define MSM_MDF_INITIALIZED (1 << 1)
  38. #define MSM_MDF_MEM_ALLOCATED (1 << 2)
  39. #define MSM_MDF_MEM_MAPPED (1 << 3)
  40. #define MSM_MDF_MEM_PERMISSION (1 << 4) /* 0 - HLOS, 1 - Subsys */
  41. /* TODO: Update IOVA range for subsys SMMUs */
  42. #define MSM_MDF_IOVA_START 0x80000000
  43. #define MSM_MDF_IOVA_LEN 0x800000
  44. #define MSM_MDF_SMMU_SID_OFFSET 32
  45. #define ADSP_STATE_READY_TIMEOUT_MS 3000
  46. /* mem protection defines */
  47. #define TZ_MPU_LOCK_NS_REGION 0x00000025
  48. #define MEM_PROTECT_AC_PERM_READ 0x4
  49. #define MEM_PROTECT_AC_PERM_WRITE 0x2
  50. #define MSM_AUDIO_SMMU_SID_OFFSET 32
  51. enum {
  52. SUBSYS_ADSP, /* Audio DSP must have index 0 */
  53. SUBSYS_SCC, /* Sensor DSP */
  54. SUBSYS_MSS, /* Modem DSP */
  55. SUBSYS_CDSP, /* Compute DSP */
  56. SUBSYS_MAX,
  57. };
  58. struct msm_mdf_dest_vm_and_perm_info {
  59. uint32_t dst_vm;
  60. /* Destination VM defined by ACVirtualMachineId. */
  61. uint32_t dst_vm_perm;
  62. /* Permissions of the IPA to be mapped to VM, bitwise OR of AC_PERM. */
  63. uint64_t ctx;
  64. /* Destination of the VM-specific context information. */
  65. uint32_t ctx_size;
  66. /* Size of context buffer in bytes. */
  67. };
  68. struct msm_mdf_protect_mem {
  69. uint64_t dma_start_address;
  70. uint64_t dma_end_address;
  71. struct msm_mdf_dest_vm_and_perm_info dest_info[SUBSYS_MAX];
  72. uint32_t dest_info_size;
  73. };
  74. struct msm_mdf_mem {
  75. struct device *dev;
  76. uint8_t device_status;
  77. uint32_t map_handle;
  78. struct dma_buf *dma_buf;
  79. dma_addr_t dma_addr;
  80. size_t size;
  81. void *va;
  82. };
  83. static struct msm_mdf_mem mdf_mem_data = {NULL,};
  84. struct msm_mdf_smmu {
  85. bool enabled;
  86. char *subsys;
  87. int vmid;
  88. uint32_t proc_id;
  89. struct device *cb_dev;
  90. uint8_t device_status;
  91. uint64_t sid;
  92. struct dma_iommu_mapping *mapping;
  93. u64 pa;
  94. size_t pa_len;
  95. };
  96. static struct msm_mdf_smmu mdf_smmu_data[SUBSYS_MAX] = {
  97. {
  98. .subsys = "adsp",
  99. .vmid = VMID_LPASS,
  100. },
  101. {
  102. .subsys = "dsps",
  103. .vmid = VMID_SSC_Q6,
  104. .proc_id = AVS_MDF_SSC_PROC_ID,
  105. },
  106. {
  107. .subsys = "modem",
  108. .vmid = VMID_MSS_MSA,
  109. .proc_id = AVS_MDF_MDSP_PROC_ID,
  110. },
  111. {
  112. .subsys = "cdsp",
  113. .vmid = VMID_CDSP,
  114. .proc_id = AVS_MDF_CDSP_PROC_ID,
  115. },
  116. };
  117. static void *ssr_handle;
  118. static inline uint64_t buf_page_start(uint64_t buf)
  119. {
  120. uint64_t start = (uint64_t) buf & PAGE_MASK;
  121. return start;
  122. }
  123. static inline uint64_t buf_page_offset(uint64_t buf)
  124. {
  125. uint64_t offset = (uint64_t) buf & (PAGE_SIZE - 1);
  126. return offset;
  127. }
  128. static inline int buf_num_pages(uint64_t buf, ssize_t len)
  129. {
  130. uint64_t start = buf_page_start(buf) >> PAGE_SHIFT;
  131. uint64_t end = (((uint64_t) buf + len - 1) & PAGE_MASK) >> PAGE_SHIFT;
  132. int nPages = end - start + 1;
  133. return nPages;
  134. }
  135. static inline uint64_t buf_page_size(uint32_t size)
  136. {
  137. uint64_t sz = (size + (PAGE_SIZE - 1)) & PAGE_MASK;
  138. return sz > PAGE_SIZE ? sz : PAGE_SIZE;
  139. }
  140. static inline void *uint64_to_ptr(uint64_t addr)
  141. {
  142. void *ptr = (void *)((uintptr_t)addr);
  143. return ptr;
  144. }
  145. static inline uint64_t ptr_to_uint64(void *ptr)
  146. {
  147. uint64_t addr = (uint64_t)((uintptr_t)ptr);
  148. return addr;
  149. }
  150. static int msm_mdf_dma_buf_map(struct msm_mdf_mem *mem,
  151. struct msm_mdf_smmu *smmu)
  152. {
  153. int rc = 0;
  154. dma_addr_t pa = 0;
  155. smmu->pa = 0;
  156. if (!smmu)
  157. return -EINVAL;
  158. if (smmu->device_status & MSM_MDF_MEM_MAPPED)
  159. return 0;
  160. if (smmu->enabled) {
  161. if (smmu->cb_dev == NULL) {
  162. pr_err("%s: cb device is not initialized\n",
  163. __func__);
  164. /* Retry if LPASS cb device is not ready
  165. * from audio ION during probing.
  166. */
  167. if (!strcmp("adsp", smmu->subsys)) {
  168. rc = msm_audio_ion_get_smmu_info(&smmu->cb_dev,
  169. &smmu->sid);
  170. if (rc) {
  171. pr_err("%s: msm_audio_ion_get_smmu_info failed, rc = %d\n",
  172. __func__, rc);
  173. goto err;
  174. }
  175. } else
  176. return -ENODEV;
  177. }
  178. pa = dma_map_single_attrs(smmu->cb_dev, mem->va,
  179. mem->size, DMA_BIDIRECTIONAL, DMA_ATTR_SKIP_CPU_SYNC);
  180. if (dma_mapping_error(smmu->cb_dev, smmu->pa)) {
  181. rc = -ENOMEM;
  182. pr_err("%s: failed to map single, rc = %d\n",
  183. __func__, rc);
  184. goto err;
  185. }
  186. smmu->pa |= pa;
  187. smmu->pa_len = mem->size;
  188. /* Append the SMMU SID information to the IOVA address */
  189. if (smmu->sid)
  190. smmu->pa |= smmu->sid;
  191. } else {
  192. smmu->pa |= mem->dma_addr;
  193. smmu->pa_len = mem->size;
  194. }
  195. pr_err("%s: pa=%pa, pa_len=%zd\n", __func__,
  196. &smmu->pa, smmu->pa_len);
  197. smmu->device_status |= MSM_MDF_MEM_MAPPED;
  198. return 0;
  199. err:
  200. return rc;
  201. }
  202. static int msm_mdf_alloc_dma_buf(struct msm_mdf_mem *mem)
  203. {
  204. int rc = 0;
  205. if (!mem)
  206. return -EINVAL;
  207. if (mem->device_status & MSM_MDF_MEM_ALLOCATED)
  208. return 0;
  209. if (mem->dev == NULL) {
  210. pr_err("%s: device is not initialized\n",
  211. __func__);
  212. return -ENODEV;
  213. }
  214. mem->va = dma_alloc_attrs(mem->dev, mem->size,
  215. &mem->dma_addr, GFP_KERNEL, DMA_ATTR_NO_KERNEL_MAPPING);
  216. if (IS_ERR_OR_NULL(mem->va)) {
  217. pr_err("%s: failed to allocate dma memory, rc = %d\n",
  218. __func__, rc);
  219. return -ENOMEM;
  220. }
  221. mem->va = phys_to_virt(mem->dma_addr);
  222. mem->device_status |= MSM_MDF_MEM_ALLOCATED;
  223. return rc;
  224. }
  225. static int msm_mdf_free_dma_buf(struct msm_mdf_mem *mem)
  226. {
  227. if (!mem)
  228. return -EINVAL;
  229. if (mem->dev == NULL) {
  230. pr_err("%s: device is not initialized\n",
  231. __func__);
  232. return -ENODEV;
  233. }
  234. //dma_free_coherent(mem->dev, mem->size, mem->va,
  235. // mem->dma_addr);
  236. mem->device_status &= ~MSM_MDF_MEM_ALLOCATED;
  237. return 0;
  238. }
  239. static int msm_mdf_dma_buf_unmap(struct msm_mdf_mem *mem,
  240. struct msm_mdf_smmu *smmu)
  241. {
  242. if (!smmu)
  243. return -EINVAL;
  244. if (smmu->enabled) {
  245. if (smmu->cb_dev == NULL) {
  246. pr_err("%s: cb device is not initialized\n",
  247. __func__);
  248. return -ENODEV;
  249. }
  250. //if (smmu->pa && mem->size)
  251. //dma_unmap_single(smmu->cb_dev, smmu->pa,
  252. // mem->size, DMA_BIDIRECTIONAL);
  253. }
  254. smmu->device_status &= ~MSM_MDF_MEM_MAPPED;
  255. return 0;
  256. }
  257. static int msm_mdf_map_memory_to_subsys(struct msm_mdf_mem *mem,
  258. struct msm_mdf_smmu *smmu)
  259. {
  260. int rc = 0;
  261. if (!mem || !smmu)
  262. return -EINVAL;
  263. /* Map mdf shared memory to ADSP */
  264. if (!strcmp("adsp", smmu->subsys)) {
  265. rc = q6core_map_mdf_memory_regions((uint64_t *)&smmu->pa,
  266. ADSP_MEMORY_MAP_MDF_SHMEM_4K_POOL,
  267. (uint32_t *)&smmu->pa_len, 1, &mem->map_handle);
  268. if (rc) {
  269. pr_err("%s: q6core_map_memory_regions failed, rc = %d\n",
  270. __func__, rc);
  271. }
  272. } else {
  273. if (mem->map_handle) {
  274. /* Map mdf shared memory to remote DSPs */
  275. rc = q6core_map_mdf_shared_memory(mem->map_handle,
  276. (uint64_t *)&smmu->pa, smmu->proc_id,
  277. (uint32_t *)&smmu->pa_len, 1);
  278. if (rc) {
  279. pr_err("%s: q6core_map_mdf_shared_memory failed, rc = %d\n",
  280. __func__, rc);
  281. }
  282. }
  283. }
  284. return rc;
  285. }
  286. static void msm_mdf_unmap_memory_to_subsys(struct msm_mdf_mem *mem,
  287. struct msm_mdf_smmu *smmu)
  288. {
  289. if (!mem || !smmu)
  290. return;
  291. if (!strcmp("adsp", smmu->subsys)) {
  292. if (mem->map_handle)
  293. q6core_memory_unmap_regions(mem->map_handle);
  294. }
  295. }
  296. static int msm_mdf_assign_memory_to_subsys(struct msm_mdf_mem *mem)
  297. {
  298. int ret = 0, i;
  299. struct scm_desc desc = {0};
  300. struct msm_mdf_protect_mem *scm_buffer;
  301. uint32_t fnid;
  302. scm_buffer = kzalloc(sizeof(struct msm_mdf_protect_mem), GFP_KERNEL);
  303. if (!scm_buffer)
  304. return -ENOMEM;
  305. scm_buffer->dma_start_address = mem->dma_addr;
  306. scm_buffer->dma_end_address = mem->dma_addr + buf_page_size(mem->size);
  307. for (i = 0; i < SUBSYS_MAX; i++) {
  308. scm_buffer->dest_info[i].dst_vm = mdf_smmu_data[i].vmid;
  309. scm_buffer->dest_info[i].dst_vm_perm =
  310. MEM_PROTECT_AC_PERM_READ | MEM_PROTECT_AC_PERM_WRITE;
  311. scm_buffer->dest_info[i].ctx = 0;
  312. scm_buffer->dest_info[i].ctx_size = 0;
  313. }
  314. scm_buffer->dest_info_size =
  315. sizeof(struct msm_mdf_dest_vm_and_perm_info) * SUBSYS_MAX;
  316. /* flush cache required by scm_call2 */
  317. dmac_flush_range(scm_buffer, ((void *)scm_buffer) +
  318. sizeof(struct msm_mdf_protect_mem));
  319. desc.args[0] = scm_buffer->dma_start_address;
  320. desc.args[1] = scm_buffer->dma_end_address;
  321. desc.args[2] = virt_to_phys(&(scm_buffer->dest_info[0]));
  322. desc.args[3] = scm_buffer->dest_info_size;
  323. desc.arginfo = SCM_ARGS(4, SCM_VAL, SCM_VAL, SCM_RO, SCM_VAL);
  324. fnid = SCM_SIP_FNID(SCM_SVC_MP, TZ_MPU_LOCK_NS_REGION);
  325. ret = scm_call2(fnid, &desc);
  326. if (ret < 0) {
  327. pr_err("%s: SCM call2 failed, ret %d scm_resp %llu\n",
  328. __func__, ret, desc.ret[0]);
  329. }
  330. /* No More need for scm_buffer, freeing the same */
  331. kfree(scm_buffer);
  332. return ret;
  333. }
  334. /**
  335. * msm_mdf_mem_init - Initializes MDF memory pool and
  336. * map memory to subsystem
  337. *
  338. * Returns 0 on success or ret on failure.
  339. */
  340. int msm_mdf_mem_init(void)
  341. {
  342. int rc = 0, i, j;
  343. struct msm_mdf_mem *mem = &mdf_mem_data;
  344. struct msm_mdf_smmu *smmu;
  345. unsigned long timeout = jiffies +
  346. msecs_to_jiffies(ADSP_STATE_READY_TIMEOUT_MS);
  347. int adsp_ready = 0;
  348. if (!(mdf_mem_data.device_status & MSM_MDF_PROBED))
  349. return -ENODEV;
  350. if (mdf_mem_data.device_status & MSM_MDF_INITIALIZED)
  351. return 0;
  352. /* TODO: pulling may not be needed as Q6 Core state should be
  353. * checked during machine driver probing.
  354. */
  355. do {
  356. if (!q6core_is_adsp_ready()) {
  357. pr_err("%s: ADSP Audio NOT Ready\n",
  358. __func__);
  359. /* ADSP will be coming up after subsystem restart and
  360. * it might not be fully up when the control reaches
  361. * here. So, wait for 50msec before checking ADSP state
  362. */
  363. msleep(50);
  364. } else {
  365. pr_debug("%s: ADSP Audio Ready\n",
  366. __func__);
  367. adsp_ready = 1;
  368. break;
  369. }
  370. } while (time_after(timeout, jiffies));
  371. if (!adsp_ready) {
  372. pr_err("%s: timed out waiting for ADSP Audio\n",
  373. __func__);
  374. return -ETIMEDOUT;
  375. }
  376. if (mem->device_status & MSM_MDF_MEM_ALLOCATED) {
  377. for (i = 0; i < SUBSYS_MAX; i++) {
  378. smmu = &mdf_smmu_data[i];
  379. rc = msm_mdf_dma_buf_map(mem, smmu);
  380. if (rc) {
  381. pr_err("%s: msm_mdf_dma_buf_map failed, rc = %d\n",
  382. __func__, rc);
  383. goto err;
  384. }
  385. }
  386. rc = msm_mdf_assign_memory_to_subsys(mem);
  387. if (rc) {
  388. pr_err("%s: msm_mdf_assign_memory_to_subsys failed\n",
  389. __func__);
  390. goto err;
  391. }
  392. for (j = 0; j < SUBSYS_MAX; j++) {
  393. smmu = &mdf_smmu_data[j];
  394. rc = msm_mdf_map_memory_to_subsys(mem, smmu);
  395. if (rc) {
  396. pr_err("%s: msm_mdf_map_memory_to_subsys failed\n",
  397. __func__);
  398. goto err;
  399. }
  400. }
  401. mdf_mem_data.device_status |= MSM_MDF_INITIALIZED;
  402. }
  403. return 0;
  404. err:
  405. return rc;
  406. }
  407. EXPORT_SYMBOL(msm_mdf_mem_init);
  408. int msm_mdf_mem_deinit(void)
  409. {
  410. int rc = 0, i;
  411. struct msm_mdf_mem *mem = &mdf_mem_data;
  412. struct msm_mdf_smmu *smmu;
  413. if (!(mdf_mem_data.device_status & MSM_MDF_INITIALIZED))
  414. return -ENODEV;
  415. for (i = SUBSYS_MAX - 1; i >= 0; i--) {
  416. smmu = &mdf_smmu_data[i];
  417. msm_mdf_unmap_memory_to_subsys(mem, smmu);
  418. }
  419. if (!rc) {
  420. for (i = SUBSYS_MAX - 1; i >= 0; i--) {
  421. smmu = &mdf_smmu_data[i];
  422. msm_mdf_dma_buf_unmap(mem, smmu);
  423. }
  424. msm_mdf_free_dma_buf(mem);
  425. mem->device_status &= ~MSM_MDF_MEM_ALLOCATED;
  426. }
  427. mdf_mem_data.device_status &= ~MSM_MDF_INITIALIZED;
  428. return 0;
  429. }
  430. EXPORT_SYMBOL(msm_mdf_mem_deinit);
  431. static int msm_mdf_restart_notifier_cb(struct notifier_block *this,
  432. unsigned long code,
  433. void *_cmd)
  434. {
  435. static int boot_count = 3;
  436. /* During LPASS boot, HLOS receives events:
  437. * SUBSYS_BEFORE_POWERUP
  438. * SUBSYS_PROXY_VOTE
  439. * SUBSYS_AFTER_POWERUP - need skip
  440. * SUBSYS_PROXY_UNVOTE
  441. */
  442. if (boot_count) {
  443. boot_count--;
  444. return NOTIFY_OK;
  445. }
  446. switch (code) {
  447. case SUBSYS_BEFORE_SHUTDOWN:
  448. pr_debug("Subsys Notify: Shutdown Started\n");
  449. /* Unmap and free memory upon restart event. */
  450. msm_mdf_mem_deinit();
  451. break;
  452. case SUBSYS_AFTER_SHUTDOWN:
  453. pr_debug("Subsys Notify: Shutdown Completed\n");
  454. break;
  455. case SUBSYS_BEFORE_POWERUP:
  456. pr_debug("Subsys Notify: Bootup Started\n");
  457. break;
  458. case SUBSYS_AFTER_POWERUP:
  459. pr_debug("Subsys Notify: Bootup Completed\n");
  460. /* Allocate and map memory after restart complete. */
  461. if (msm_mdf_mem_init())
  462. pr_err("msm_mdf_mem_init failed\n");
  463. break;
  464. default:
  465. pr_err("Subsys Notify: Generel: %lu\n", code);
  466. break;
  467. }
  468. return NOTIFY_DONE;
  469. }
  470. static const struct of_device_id msm_mdf_match_table[] = {
  471. { .compatible = "qcom,msm-mdf", },
  472. { .compatible = "qcom,msm-mdf-mem-region", },
  473. { .compatible = "qcom,msm-mdf-cb", },
  474. {}
  475. };
  476. MODULE_DEVICE_TABLE(of, msm_mdf_match_table);
  477. static int msm_mdf_cb_probe(struct device *dev)
  478. {
  479. struct msm_mdf_smmu *smmu;
  480. u64 smmu_sid = 0;
  481. u64 smmu_sid_mask = 0;
  482. struct of_phandle_args iommuspec;
  483. const char *subsys;
  484. int rc = 0, i;
  485. subsys = of_get_property(dev->of_node, "label", NULL);
  486. if (!subsys) {
  487. dev_err(dev, "%s: could not get label\n",
  488. __func__);
  489. return -EINVAL;
  490. }
  491. for (i = 0; i < SUBSYS_MAX; i++) {
  492. if (!mdf_smmu_data[i].subsys)
  493. continue;
  494. if (!strcmp(subsys, mdf_smmu_data[i].subsys))
  495. break;
  496. }
  497. if (i >= SUBSYS_MAX) {
  498. dev_err(dev, "%s: subsys %s not supported\n",
  499. __func__, subsys);
  500. return -EINVAL;
  501. }
  502. smmu = &mdf_smmu_data[i];
  503. smmu->enabled = of_property_read_bool(dev->of_node,
  504. "qcom,smmu-enabled");
  505. dev_info(dev, "%s: SMMU is %s for %s\n", __func__,
  506. (smmu->enabled) ? "enabled" : "disabled",
  507. smmu->subsys);
  508. if (smmu->enabled) {
  509. /* Get SMMU SID information from Devicetree */
  510. rc = of_property_read_u64(dev->of_node,
  511. "qcom,smmu-sid-mask",
  512. &smmu_sid_mask);
  513. if (rc) {
  514. dev_err(dev,
  515. "%s: qcom,smmu-sid-mask missing in DT node, using default\n",
  516. __func__);
  517. smmu_sid_mask = 0xF;
  518. }
  519. rc = of_parse_phandle_with_args(dev->of_node, "iommus",
  520. "#iommu-cells", 0, &iommuspec);
  521. if (rc)
  522. dev_err(dev, "%s: could not get smmu SID, ret = %d\n",
  523. __func__, rc);
  524. else
  525. smmu_sid = (iommuspec.args[0] & smmu_sid_mask);
  526. smmu->sid =
  527. smmu_sid << MSM_AUDIO_SMMU_SID_OFFSET;
  528. smmu->cb_dev = dev;
  529. }
  530. return 0;
  531. }
  532. static int msm_mdf_remove(struct platform_device *pdev)
  533. {
  534. int rc = 0, i;
  535. for (i = 0; i < SUBSYS_MAX; i++) {
  536. if (!IS_ERR_OR_NULL(mdf_smmu_data[i].cb_dev))
  537. arm_iommu_detach_device(mdf_smmu_data[i].cb_dev);
  538. if (!IS_ERR_OR_NULL(mdf_smmu_data[i].mapping))
  539. arm_iommu_release_mapping(mdf_smmu_data[i].mapping);
  540. mdf_smmu_data[i].enabled = 0;
  541. }
  542. mdf_mem_data.device_status = 0;
  543. return rc;
  544. }
  545. static int msm_mdf_probe(struct platform_device *pdev)
  546. {
  547. int rc = 0;
  548. enum apr_subsys_state q6_state;
  549. struct device *dev = &pdev->dev;
  550. uint32_t mdf_mem_data_size = 0;
  551. /* TODO: MDF probing should have no dependency
  552. * on ADSP Q6 state.
  553. */
  554. q6_state = apr_get_q6_state();
  555. if (q6_state == APR_SUBSYS_DOWN) {
  556. dev_dbg(dev, "defering %s, adsp_state %d\n",
  557. __func__, q6_state);
  558. rc = -EPROBE_DEFER;
  559. goto err;
  560. } else
  561. dev_dbg(dev, "%s: adsp is ready\n", __func__);
  562. if (of_device_is_compatible(dev->of_node,
  563. "qcom,msm-mdf-cb"))
  564. return msm_mdf_cb_probe(dev);
  565. if (of_device_is_compatible(dev->of_node,
  566. "qcom,msm-mdf-mem-region")) {
  567. mdf_mem_data.dev = dev;
  568. rc = of_property_read_u32(dev->of_node,
  569. "qcom,msm-mdf-mem-data-size",
  570. &mdf_mem_data_size);
  571. if (rc) {
  572. dev_dbg(&pdev->dev, "MDF mem data size entry not found\n");
  573. goto err;
  574. }
  575. mdf_mem_data.size = mdf_mem_data_size;
  576. dev_info(dev, "%s: mem region size %zd\n",
  577. __func__, mdf_mem_data.size);
  578. msm_mdf_alloc_dma_buf(&mdf_mem_data);
  579. return 0;
  580. }
  581. rc = of_platform_populate(pdev->dev.of_node,
  582. msm_mdf_match_table,
  583. NULL, &pdev->dev);
  584. if (rc) {
  585. dev_err(&pdev->dev, "%s: failed to populate child nodes",
  586. __func__);
  587. goto err;
  588. }
  589. mdf_mem_data.device_status |= MSM_MDF_PROBED;
  590. err:
  591. return rc;
  592. }
  593. static struct platform_driver msm_mdf_driver = {
  594. .probe = msm_mdf_probe,
  595. .remove = msm_mdf_remove,
  596. .driver = {
  597. .name = "msm-mdf",
  598. .owner = THIS_MODULE,
  599. .of_match_table = msm_mdf_match_table,
  600. },
  601. };
  602. static struct notifier_block nb = {
  603. .priority = 0,
  604. .notifier_call = msm_mdf_restart_notifier_cb,
  605. };
  606. int __init msm_mdf_init(void)
  607. {
  608. /* Only need to monitor SSR from ADSP, which
  609. * is the master DSP managing MDF memory.
  610. */
  611. ssr_handle = subsys_notif_register_notifier("adsp", &nb);
  612. return platform_driver_register(&msm_mdf_driver);
  613. }
  614. void __exit msm_mdf_exit(void)
  615. {
  616. platform_driver_unregister(&msm_mdf_driver);
  617. if (ssr_handle)
  618. subsys_notif_unregister_notifier(ssr_handle, &nb);
  619. }
  620. MODULE_DESCRIPTION("MSM MDF Module");
  621. MODULE_LICENSE("GPL v2");