sde_rotator_smmu.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
  4. * Copyright (c) 2015-2021, The Linux Foundation. All rights reserved.
  5. */
  6. #define pr_fmt(fmt) "%s: " fmt, __func__
  7. #include <linux/clk.h>
  8. #include <linux/debugfs.h>
  9. #include <linux/kernel.h>
  10. #include <linux/version.h>
  11. #include <linux/module.h>
  12. #include <linux/iommu.h>
  13. #include <linux/of.h>
  14. #include <linux/of_address.h>
  15. #include <linux/dma-mapping.h>
  16. #include <linux/dma-buf.h>
  17. #include <linux/of_platform.h>
  18. #include <linux/msm_dma_iommu_mapping.h>
  19. #include "soc/qcom/secure_buffer.h"
  20. #include "sde_rotator_base.h"
  21. #include "sde_rotator_util.h"
  22. #include "sde_rotator_io_util.h"
  23. #include "sde_rotator_smmu.h"
  24. #include "sde_rotator_debug.h"
  25. #define SMMU_SDE_ROT_SEC "qcom,smmu_sde_rot_sec"
  26. #define SMMU_SDE_ROT_UNSEC "qcom,smmu_sde_rot_unsec"
  27. struct sde_smmu_domain {
  28. char *ctx_name;
  29. int domain;
  30. };
  31. static inline bool sde_smmu_is_valid_domain_type(
  32. struct sde_rot_data_type *mdata, int domain_type)
  33. {
  34. return true;
  35. }
  36. static inline bool sde_smmu_is_valid_domain_condition(
  37. struct sde_rot_data_type *mdata,
  38. int domain_type,
  39. bool is_attach)
  40. {
  41. if (is_attach) {
  42. if (test_bit(SDE_CAPS_SEC_ATTACH_DETACH_SMMU,
  43. mdata->sde_caps_map) &&
  44. (mdata->sec_cam_en &&
  45. domain_type == SDE_IOMMU_DOMAIN_ROT_SECURE))
  46. return false;
  47. else
  48. return true;
  49. } else {
  50. if (test_bit(SDE_CAPS_SEC_ATTACH_DETACH_SMMU,
  51. mdata->sde_caps_map) &&
  52. (mdata->sec_cam_en &&
  53. domain_type == SDE_IOMMU_DOMAIN_ROT_SECURE))
  54. return true;
  55. else
  56. return false;
  57. }
  58. }
  59. struct sde_smmu_client *sde_smmu_get_cb(u32 domain)
  60. {
  61. struct sde_rot_data_type *mdata = sde_rot_get_mdata();
  62. if (!sde_smmu_is_valid_domain_type(mdata, domain))
  63. return NULL;
  64. return (domain >= SDE_IOMMU_MAX_DOMAIN) ? NULL :
  65. &mdata->sde_smmu[domain];
  66. }
  67. static int sde_smmu_util_parse_dt_clock(struct platform_device *pdev,
  68. struct sde_module_power *mp)
  69. {
  70. u32 i = 0, rc = 0;
  71. const char *clock_name;
  72. u32 clock_rate;
  73. int num_clk;
  74. num_clk = of_property_count_strings(pdev->dev.of_node,
  75. "clock-names");
  76. if (num_clk < 0) {
  77. SDEROT_DBG("clocks are not defined\n");
  78. num_clk = 0;
  79. }
  80. mp->num_clk = num_clk;
  81. mp->clk_config = devm_kzalloc(&pdev->dev,
  82. sizeof(struct sde_clk) * mp->num_clk, GFP_KERNEL);
  83. if (num_clk && !mp->clk_config) {
  84. rc = -ENOMEM;
  85. mp->num_clk = 0;
  86. goto clk_err;
  87. }
  88. for (i = 0; i < mp->num_clk; i++) {
  89. of_property_read_string_index(pdev->dev.of_node, "clock-names",
  90. i, &clock_name);
  91. strlcpy(mp->clk_config[i].clk_name, clock_name,
  92. sizeof(mp->clk_config[i].clk_name));
  93. of_property_read_u32_index(pdev->dev.of_node, "clock-rate",
  94. i, &clock_rate);
  95. mp->clk_config[i].rate = clock_rate;
  96. if (!clock_rate)
  97. mp->clk_config[i].type = SDE_CLK_AHB;
  98. else
  99. mp->clk_config[i].type = SDE_CLK_PCLK;
  100. }
  101. clk_err:
  102. return rc;
  103. }
  104. static int sde_smmu_clk_register(struct platform_device *pdev,
  105. struct sde_module_power *mp)
  106. {
  107. int i, ret;
  108. struct clk *clk;
  109. ret = sde_smmu_util_parse_dt_clock(pdev, mp);
  110. if (ret) {
  111. SDEROT_ERR("unable to parse clocks\n");
  112. return -EINVAL;
  113. }
  114. for (i = 0; i < mp->num_clk; i++) {
  115. clk = devm_clk_get(&pdev->dev,
  116. mp->clk_config[i].clk_name);
  117. if (IS_ERR(clk)) {
  118. SDEROT_ERR("unable to get clk: %s\n",
  119. mp->clk_config[i].clk_name);
  120. return PTR_ERR(clk);
  121. }
  122. mp->clk_config[i].clk = clk;
  123. }
  124. return 0;
  125. }
  126. static int sde_smmu_enable_power(struct sde_smmu_client *sde_smmu,
  127. bool enable)
  128. {
  129. int rc = 0;
  130. struct sde_module_power *mp;
  131. if (!sde_smmu)
  132. return -EINVAL;
  133. mp = &sde_smmu->mp;
  134. if (!mp->num_vreg && !mp->num_clk)
  135. return 0;
  136. if (enable) {
  137. rc = sde_rot_enable_vreg(mp->vreg_config, mp->num_vreg, true);
  138. if (rc) {
  139. SDEROT_ERR("vreg enable failed - rc:%d\n", rc);
  140. goto end;
  141. }
  142. sde_update_reg_bus_vote(sde_smmu->reg_bus_clt,
  143. VOTE_INDEX_76_MHZ);
  144. rc = sde_rot_enable_clk(mp->clk_config, mp->num_clk, true);
  145. if (rc) {
  146. SDEROT_ERR("clock enable failed - rc:%d\n", rc);
  147. sde_update_reg_bus_vote(sde_smmu->reg_bus_clt,
  148. VOTE_INDEX_DISABLE);
  149. sde_rot_enable_vreg(mp->vreg_config, mp->num_vreg,
  150. false);
  151. goto end;
  152. }
  153. } else {
  154. sde_rot_enable_clk(mp->clk_config, mp->num_clk, false);
  155. sde_update_reg_bus_vote(sde_smmu->reg_bus_clt,
  156. VOTE_INDEX_DISABLE);
  157. sde_rot_enable_vreg(mp->vreg_config, mp->num_vreg, false);
  158. }
  159. end:
  160. return rc;
  161. }
  162. /*
  163. * sde_smmu_attach()
  164. *
  165. * Associates each configured VA range with the corresponding smmu context
  166. * bank device. Enables the clks as smmu requires voting it before the usage.
  167. * And iommu attach is done only once during the initial attach and it is never
  168. * detached as smmu v2 uses a feature called 'retention'.
  169. */
  170. int sde_smmu_attach(struct sde_rot_data_type *mdata)
  171. {
  172. struct sde_smmu_client *sde_smmu;
  173. int i, rc = 0;
  174. for (i = 0; i < SDE_IOMMU_MAX_DOMAIN; i++) {
  175. if (!sde_smmu_is_valid_domain_type(mdata, i))
  176. continue;
  177. sde_smmu = sde_smmu_get_cb(i);
  178. if (sde_smmu && sde_smmu->dev) {
  179. rc = sde_smmu_enable_power(sde_smmu, true);
  180. if (rc) {
  181. SDEROT_ERR(
  182. "power enable failed - domain:[%d] rc:%d\n",
  183. i, rc);
  184. goto err;
  185. }
  186. if (!sde_smmu->domain_attached &&
  187. sde_smmu_is_valid_domain_condition(mdata,
  188. i,
  189. true)) {
  190. rc = iommu_attach_device(
  191. sde_smmu->rot_domain, sde_smmu->dev);
  192. if (rc) {
  193. SDEROT_ERR(
  194. "iommu attach device failed for domain[%d] with err:%d\n",
  195. i, rc);
  196. sde_smmu_enable_power(sde_smmu,
  197. false);
  198. goto err;
  199. }
  200. sde_smmu->domain_attached = true;
  201. SDEROT_DBG("iommu v2 domain[%i] attached\n", i);
  202. }
  203. } else {
  204. SDEROT_DBG(
  205. "iommu device not attached for domain[%d]\n",
  206. i);
  207. }
  208. }
  209. return 0;
  210. err:
  211. for (i--; i >= 0; i--) {
  212. sde_smmu = sde_smmu_get_cb(i);
  213. if (sde_smmu && sde_smmu->dev) {
  214. iommu_detach_device(sde_smmu->rot_domain,
  215. sde_smmu->dev);
  216. sde_smmu_enable_power(sde_smmu, false);
  217. sde_smmu->domain_attached = false;
  218. }
  219. }
  220. return rc;
  221. }
  222. /*
  223. * sde_smmu_detach()
  224. *
  225. * Only disables the clks as it is not required to detach the iommu mapped
  226. * VA range from the device in smmu as explained in the sde_smmu_attach
  227. */
  228. int sde_smmu_detach(struct sde_rot_data_type *mdata)
  229. {
  230. struct sde_smmu_client *sde_smmu;
  231. int i;
  232. for (i = 0; i < SDE_IOMMU_MAX_DOMAIN; i++) {
  233. if (!sde_smmu_is_valid_domain_type(mdata, i))
  234. continue;
  235. sde_smmu = sde_smmu_get_cb(i);
  236. if (sde_smmu && sde_smmu->dev) {
  237. if (sde_smmu->domain_attached &&
  238. sde_smmu_is_valid_domain_condition(mdata,
  239. i, false)) {
  240. iommu_detach_device(sde_smmu->rot_domain,
  241. sde_smmu->dev);
  242. SDEROT_DBG("iommu domain[%i] detached\n", i);
  243. sde_smmu->domain_attached = false;
  244. }
  245. else {
  246. sde_smmu_enable_power(sde_smmu, false);
  247. }
  248. }
  249. }
  250. return 0;
  251. }
  252. int sde_smmu_get_domain_id(u32 type)
  253. {
  254. return type;
  255. }
  256. /*
  257. * sde_smmu_dma_buf_attach()
  258. *
  259. * Same as sde_smmu_dma_buf_attach except that the device is got from
  260. * the configured smmu v2 context banks.
  261. */
  262. struct dma_buf_attachment *sde_smmu_dma_buf_attach(
  263. struct dma_buf *dma_buf, struct device *dev, int domain)
  264. {
  265. struct sde_smmu_client *sde_smmu = sde_smmu_get_cb(domain);
  266. if (!sde_smmu) {
  267. SDEROT_ERR("not able to get smmu context\n");
  268. return NULL;
  269. }
  270. return dma_buf_attach(dma_buf, sde_smmu->dev);
  271. }
  272. /*
  273. * sde_smmu_map_dma_buf()
  274. *
  275. * Maps existing buffer (by struct scatterlist) into SMMU context bank device.
  276. * From which we can take the virtual address and size allocated.
  277. * msm_map_dma_buf is depricated with smmu v2 and it uses dma_map_sg instead
  278. */
  279. int sde_smmu_map_dma_buf(struct dma_buf *dma_buf,
  280. struct sg_table *table, int domain, dma_addr_t *iova,
  281. unsigned long *size, int dir)
  282. {
  283. int rc;
  284. struct sde_smmu_client *sde_smmu = sde_smmu_get_cb(domain);
  285. unsigned long attrs = 0;
  286. if (!sde_smmu) {
  287. SDEROT_ERR("not able to get smmu context\n");
  288. return -EINVAL;
  289. }
  290. rc = dma_map_sg_attrs(sde_smmu->dev, table->sgl, table->nents, dir,
  291. attrs);
  292. if (!rc) {
  293. SDEROT_ERR("dma map sg failed\n");
  294. return -ENOMEM;
  295. }
  296. *iova = table->sgl->dma_address;
  297. *size = table->sgl->dma_length;
  298. return 0;
  299. }
  300. void sde_smmu_unmap_dma_buf(struct sg_table *table, int domain,
  301. int dir, struct dma_buf *dma_buf)
  302. {
  303. struct sde_smmu_client *sde_smmu = sde_smmu_get_cb(domain);
  304. if (!sde_smmu) {
  305. SDEROT_ERR("not able to get smmu context\n");
  306. return;
  307. }
  308. dma_unmap_sg(sde_smmu->dev, table->sgl, table->nents, dir);
  309. }
  310. static DEFINE_MUTEX(sde_smmu_ref_cnt_lock);
  311. int sde_smmu_ctrl(int enable)
  312. {
  313. struct sde_rot_data_type *mdata = sde_rot_get_mdata();
  314. int rc = 0;
  315. mutex_lock(&sde_smmu_ref_cnt_lock);
  316. SDEROT_EVTLOG(__builtin_return_address(0), enable, mdata->iommu_ref_cnt,
  317. mdata->iommu_attached);
  318. SDEROT_DBG("%pS: enable:%d ref_cnt:%d attach:%d\n",
  319. __builtin_return_address(0), enable, mdata->iommu_ref_cnt,
  320. mdata->iommu_attached);
  321. if (enable) {
  322. if (!mdata->iommu_attached) {
  323. rc = sde_smmu_attach(mdata);
  324. if (!rc)
  325. mdata->iommu_attached = true;
  326. }
  327. mdata->iommu_ref_cnt++;
  328. } else {
  329. if (mdata->iommu_ref_cnt) {
  330. mdata->iommu_ref_cnt--;
  331. if (mdata->iommu_ref_cnt == 0)
  332. if (mdata->iommu_attached) {
  333. rc = sde_smmu_detach(mdata);
  334. if (!rc)
  335. mdata->iommu_attached = false;
  336. }
  337. } else {
  338. SDEROT_ERR("unbalanced iommu ref\n");
  339. }
  340. }
  341. mutex_unlock(&sde_smmu_ref_cnt_lock);
  342. if (rc < 0)
  343. return rc;
  344. else
  345. return mdata->iommu_ref_cnt;
  346. }
  347. int sde_smmu_secure_ctrl(int enable)
  348. {
  349. struct sde_rot_data_type *mdata = sde_rot_get_mdata();
  350. int rc = 0;
  351. mutex_lock(&sde_smmu_ref_cnt_lock);
  352. /*
  353. * Attach/detach secure context irrespective of ref count,
  354. * We come here only when secure camera is disabled
  355. */
  356. if (enable) {
  357. rc = sde_smmu_attach(mdata);
  358. if (!rc)
  359. mdata->iommu_attached = true;
  360. } else {
  361. rc = sde_smmu_detach(mdata);
  362. /*
  363. * keep iommu_attached equal to true,
  364. * so that driver does not attemp to attach
  365. * while in secure state
  366. */
  367. }
  368. mutex_unlock(&sde_smmu_ref_cnt_lock);
  369. return rc;
  370. }
  371. /*
  372. * sde_smmu_device_create()
  373. * @dev: sde_mdp device
  374. *
  375. * For smmu, each context bank is a separate child device of sde rot.
  376. * Platform devices are created for those smmu related child devices of
  377. * sde rot here. This would facilitate probes to happen for these devices in
  378. * which the smmu mapping and initialization is handled.
  379. */
  380. void sde_smmu_device_create(struct device *dev)
  381. {
  382. struct device_node *parent, *child;
  383. struct sde_rot_data_type *mdata = sde_rot_get_mdata();
  384. parent = dev->of_node;
  385. for_each_child_of_node(parent, child) {
  386. if (of_device_is_compatible(child, SMMU_SDE_ROT_SEC)) {
  387. of_platform_device_create(child, NULL, dev);
  388. mdata->sde_smmu
  389. [SDE_IOMMU_DOMAIN_ROT_SECURE].domain_attached = true;
  390. } else if (of_device_is_compatible(child, SMMU_SDE_ROT_UNSEC)) {
  391. of_platform_device_create(child, NULL, dev);
  392. mdata->sde_smmu
  393. [SDE_IOMMU_DOMAIN_ROT_UNSECURE].domain_attached = true;
  394. }
  395. }
  396. }
  397. int sde_smmu_init(struct device *dev)
  398. {
  399. sde_smmu_device_create(dev);
  400. return 0;
  401. }
  402. static int sde_smmu_fault_handler(struct iommu_domain *domain,
  403. struct device *dev, unsigned long iova,
  404. int flags, void *token)
  405. {
  406. struct sde_smmu_client *sde_smmu;
  407. int rc = -EINVAL;
  408. if (!token) {
  409. SDEROT_ERR("Error: token is NULL\n");
  410. return -EINVAL;
  411. }
  412. sde_smmu = (struct sde_smmu_client *)token;
  413. /* trigger rotator dump */
  414. SDEROT_ERR("trigger rotator dump, iova=0x%08lx, flags=0x%x\n",
  415. iova, flags);
  416. SDEROT_ERR("SMMU device:%s", sde_smmu->dev->kobj.name);
  417. /* generate dump, but no panic */
  418. SDEROT_EVTLOG_TOUT_HANDLER("rot", "rot_dbg_bus", "vbif_dbg_bus");
  419. /*
  420. * return -ENOSYS to allow smmu driver to dump out useful
  421. * debug info.
  422. */
  423. return rc;
  424. }
  425. static struct sde_smmu_domain sde_rot_unsec = {
  426. "rot_0", SDE_IOMMU_DOMAIN_ROT_UNSECURE};
  427. static struct sde_smmu_domain sde_rot_sec = {
  428. "rot_1", SDE_IOMMU_DOMAIN_ROT_SECURE};
  429. static const struct of_device_id sde_smmu_dt_match[] = {
  430. { .compatible = SMMU_SDE_ROT_UNSEC, .data = &sde_rot_unsec},
  431. { .compatible = SMMU_SDE_ROT_SEC, .data = &sde_rot_sec},
  432. {}
  433. };
  434. /*
  435. * sde_smmu_probe()
  436. * @pdev: platform device
  437. *
  438. * Each smmu context acts as a separate device and the context banks are
  439. * configured with a VA range.
  440. * Registeres the clks as each context bank has its own clks, for which voting
  441. * has to be done everytime before using that context bank.
  442. */
  443. int sde_smmu_probe(struct platform_device *pdev)
  444. {
  445. struct device *dev;
  446. struct sde_rot_data_type *mdata = sde_rot_get_mdata();
  447. struct sde_smmu_client *sde_smmu;
  448. int rc = 0;
  449. struct sde_smmu_domain smmu_domain;
  450. const struct of_device_id *match;
  451. struct sde_module_power *mp;
  452. char name[MAX_CLIENT_NAME_LEN];
  453. u32 sid = 0;
  454. if (!mdata) {
  455. SDEROT_INFO(
  456. "probe failed as mdata is not initializedi, probe defer\n");
  457. return -EPROBE_DEFER;
  458. }
  459. match = of_match_device(sde_smmu_dt_match, &pdev->dev);
  460. if (!match || !match->data) {
  461. SDEROT_ERR("probe failed as match data is invalid\n");
  462. return -EINVAL;
  463. }
  464. smmu_domain = *(struct sde_smmu_domain *) (match->data);
  465. if (smmu_domain.domain >= SDE_IOMMU_MAX_DOMAIN) {
  466. SDEROT_ERR("no matching device found\n");
  467. return -EINVAL;
  468. }
  469. if (of_find_property(pdev->dev.of_node, "iommus", NULL)) {
  470. dev = &pdev->dev;
  471. rc = of_property_read_u32_index(pdev->dev.of_node, "iommus",
  472. 1, &sid);
  473. if (rc)
  474. SDEROT_DBG("SID not defined for domain:%d",
  475. smmu_domain.domain);
  476. } else {
  477. SDEROT_ERR("Invalid SMMU ctx for domain:%d\n",
  478. smmu_domain.domain);
  479. return -EINVAL;
  480. }
  481. sde_smmu = &mdata->sde_smmu[smmu_domain.domain];
  482. sde_smmu->domain = smmu_domain.domain;
  483. sde_smmu->sid = sid;
  484. mp = &sde_smmu->mp;
  485. memset(mp, 0, sizeof(struct sde_module_power));
  486. if (of_find_property(pdev->dev.of_node,
  487. "gdsc-mdss-supply", NULL)) {
  488. mp->vreg_config = devm_kzalloc(&pdev->dev,
  489. sizeof(struct sde_vreg), GFP_KERNEL);
  490. if (!mp->vreg_config)
  491. return -ENOMEM;
  492. strlcpy(mp->vreg_config->vreg_name, "gdsc-mdss",
  493. sizeof(mp->vreg_config->vreg_name));
  494. mp->num_vreg = 1;
  495. }
  496. if (mp->vreg_config) {
  497. rc = sde_rot_config_vreg(&pdev->dev, mp->vreg_config,
  498. mp->num_vreg, true);
  499. if (rc) {
  500. SDEROT_ERR("vreg config failed rc=%d\n", rc);
  501. goto release_vreg;
  502. }
  503. }
  504. rc = sde_smmu_clk_register(pdev, mp);
  505. if (rc) {
  506. SDEROT_ERR(
  507. "smmu clk register failed for domain[%d] with err:%d\n",
  508. smmu_domain.domain, rc);
  509. goto disable_vreg;
  510. }
  511. snprintf(name, MAX_CLIENT_NAME_LEN, "smmu:%u", smmu_domain.domain);
  512. sde_smmu->reg_bus_clt = sde_reg_bus_vote_client_create(name);
  513. if (IS_ERR_OR_NULL(sde_smmu->reg_bus_clt)) {
  514. SDEROT_ERR("mdss bus client register failed\n");
  515. rc = PTR_ERR(sde_smmu->reg_bus_clt);
  516. sde_smmu->reg_bus_clt = NULL;
  517. goto unregister_clk;
  518. }
  519. rc = sde_smmu_enable_power(sde_smmu, true);
  520. if (rc) {
  521. SDEROT_ERR("power enable failed - domain:[%d] rc:%d\n",
  522. smmu_domain.domain, rc);
  523. goto bus_client_destroy;
  524. }
  525. sde_smmu->dev = &pdev->dev;
  526. sde_smmu->rot_domain = iommu_get_domain_for_dev(sde_smmu->dev);
  527. if (!sde_smmu->rot_domain) {
  528. dev_err(&pdev->dev, "iommu get domain failed\n");
  529. return -EINVAL;
  530. }
  531. if (!dev->dma_parms)
  532. dev->dma_parms = devm_kzalloc(dev,
  533. sizeof(*dev->dma_parms), GFP_KERNEL);
  534. dma_set_max_seg_size(dev, DMA_BIT_MASK(32));
  535. dma_set_seg_boundary(dev, (unsigned long)DMA_BIT_MASK(64));
  536. iommu_set_fault_handler(sde_smmu->rot_domain,
  537. sde_smmu_fault_handler, (void *)sde_smmu);
  538. sde_smmu_enable_power(sde_smmu, false);
  539. SDEROT_INFO(
  540. "iommu v2 domain[%d] mapping and clk register successful!\n",
  541. smmu_domain.domain);
  542. return 0;
  543. bus_client_destroy:
  544. sde_reg_bus_vote_client_destroy(sde_smmu->reg_bus_clt);
  545. sde_smmu->reg_bus_clt = NULL;
  546. unregister_clk:
  547. disable_vreg:
  548. sde_rot_config_vreg(&pdev->dev, sde_smmu->mp.vreg_config,
  549. sde_smmu->mp.num_vreg, false);
  550. release_vreg:
  551. devm_kfree(&pdev->dev, sde_smmu->mp.vreg_config);
  552. sde_smmu->mp.vreg_config = NULL;
  553. sde_smmu->mp.num_vreg = 0;
  554. return rc;
  555. }
  556. int sde_smmu_remove(struct platform_device *pdev)
  557. {
  558. int i;
  559. struct sde_smmu_client *sde_smmu;
  560. for (i = 0; i < SDE_IOMMU_MAX_DOMAIN; i++) {
  561. sde_smmu = sde_smmu_get_cb(i);
  562. if (!sde_smmu || !sde_smmu->dev ||
  563. (sde_smmu->dev != &pdev->dev))
  564. continue;
  565. sde_smmu->dev = NULL;
  566. sde_smmu->rot_domain = NULL;
  567. sde_smmu_enable_power(sde_smmu, false);
  568. sde_reg_bus_vote_client_destroy(sde_smmu->reg_bus_clt);
  569. sde_smmu->reg_bus_clt = NULL;
  570. sde_rot_config_vreg(&pdev->dev, sde_smmu->mp.vreg_config,
  571. sde_smmu->mp.num_vreg, false);
  572. devm_kfree(&pdev->dev, sde_smmu->mp.vreg_config);
  573. sde_smmu->mp.vreg_config = NULL;
  574. sde_smmu->mp.num_vreg = 0;
  575. }
  576. return 0;
  577. }
  578. static struct platform_driver sde_smmu_driver = {
  579. .probe = sde_smmu_probe,
  580. .remove = sde_smmu_remove,
  581. .shutdown = NULL,
  582. .driver = {
  583. .name = "sde_smmu",
  584. .of_match_table = sde_smmu_dt_match,
  585. },
  586. };
  587. void sde_rotator_smmu_driver_register(void)
  588. {
  589. platform_driver_register(&sde_smmu_driver);
  590. }
  591. void sde_rotator_smmu_driver_unregister(void)
  592. {
  593. platform_driver_unregister(&sde_smmu_driver);
  594. }
  595. #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 19, 0))
  596. MODULE_IMPORT_NS(DMA_BUF);
  597. #endif