sde_rotator_smmu.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
  4. */
  5. #define pr_fmt(fmt) "%s: " fmt, __func__
  6. #include <linux/clk.h>
  7. #include <linux/debugfs.h>
  8. #include <linux/kernel.h>
  9. #include <linux/module.h>
  10. #include <linux/iommu.h>
  11. #include <linux/of.h>
  12. #include <linux/of_address.h>
  13. #include <linux/dma-mapping.h>
  14. #include <linux/dma-buf.h>
  15. #include <linux/of_platform.h>
  16. #include <linux/msm_dma_iommu_mapping.h>
  17. #include <asm/dma-iommu.h>
  18. #include "soc/qcom/secure_buffer.h"
  19. #include "sde_rotator_base.h"
  20. #include "sde_rotator_util.h"
  21. #include "sde_rotator_io_util.h"
  22. #include "sde_rotator_smmu.h"
  23. #include "sde_rotator_debug.h"
  24. #define SMMU_SDE_ROT_SEC "qcom,smmu_sde_rot_sec"
  25. #define SMMU_SDE_ROT_UNSEC "qcom,smmu_sde_rot_unsec"
  26. struct sde_smmu_domain {
  27. char *ctx_name;
  28. int domain;
  29. };
  30. static inline bool sde_smmu_is_valid_domain_type(
  31. struct sde_rot_data_type *mdata, int domain_type)
  32. {
  33. return true;
  34. }
  35. static inline bool sde_smmu_is_valid_domain_condition(
  36. struct sde_rot_data_type *mdata,
  37. int domain_type,
  38. bool is_attach)
  39. {
  40. if (is_attach) {
  41. if (test_bit(SDE_CAPS_SEC_ATTACH_DETACH_SMMU,
  42. mdata->sde_caps_map) &&
  43. (mdata->sec_cam_en &&
  44. domain_type == SDE_IOMMU_DOMAIN_ROT_SECURE))
  45. return false;
  46. else
  47. return true;
  48. } else {
  49. if (test_bit(SDE_CAPS_SEC_ATTACH_DETACH_SMMU,
  50. mdata->sde_caps_map) &&
  51. (mdata->sec_cam_en &&
  52. domain_type == SDE_IOMMU_DOMAIN_ROT_SECURE))
  53. return true;
  54. else
  55. return false;
  56. }
  57. }
  58. struct sde_smmu_client *sde_smmu_get_cb(u32 domain)
  59. {
  60. struct sde_rot_data_type *mdata = sde_rot_get_mdata();
  61. if (!sde_smmu_is_valid_domain_type(mdata, domain))
  62. return NULL;
  63. return (domain >= SDE_IOMMU_MAX_DOMAIN) ? NULL :
  64. &mdata->sde_smmu[domain];
  65. }
  66. static int sde_smmu_util_parse_dt_clock(struct platform_device *pdev,
  67. struct sde_module_power *mp)
  68. {
  69. u32 i = 0, rc = 0;
  70. const char *clock_name;
  71. u32 clock_rate;
  72. int num_clk;
  73. num_clk = of_property_count_strings(pdev->dev.of_node,
  74. "clock-names");
  75. if (num_clk < 0) {
  76. SDEROT_DBG("clocks are not defined\n");
  77. num_clk = 0;
  78. }
  79. mp->num_clk = num_clk;
  80. mp->clk_config = devm_kzalloc(&pdev->dev,
  81. sizeof(struct sde_clk) * mp->num_clk, GFP_KERNEL);
  82. if (num_clk && !mp->clk_config) {
  83. rc = -ENOMEM;
  84. mp->num_clk = 0;
  85. goto clk_err;
  86. }
  87. for (i = 0; i < mp->num_clk; i++) {
  88. of_property_read_string_index(pdev->dev.of_node, "clock-names",
  89. i, &clock_name);
  90. strlcpy(mp->clk_config[i].clk_name, clock_name,
  91. sizeof(mp->clk_config[i].clk_name));
  92. of_property_read_u32_index(pdev->dev.of_node, "clock-rate",
  93. i, &clock_rate);
  94. mp->clk_config[i].rate = clock_rate;
  95. if (!clock_rate)
  96. mp->clk_config[i].type = SDE_CLK_AHB;
  97. else
  98. mp->clk_config[i].type = SDE_CLK_PCLK;
  99. }
  100. clk_err:
  101. return rc;
  102. }
  103. static int sde_smmu_clk_register(struct platform_device *pdev,
  104. struct sde_module_power *mp)
  105. {
  106. int i, ret;
  107. struct clk *clk;
  108. ret = sde_smmu_util_parse_dt_clock(pdev, mp);
  109. if (ret) {
  110. SDEROT_ERR("unable to parse clocks\n");
  111. return -EINVAL;
  112. }
  113. for (i = 0; i < mp->num_clk; i++) {
  114. clk = devm_clk_get(&pdev->dev,
  115. mp->clk_config[i].clk_name);
  116. if (IS_ERR(clk)) {
  117. SDEROT_ERR("unable to get clk: %s\n",
  118. mp->clk_config[i].clk_name);
  119. return PTR_ERR(clk);
  120. }
  121. mp->clk_config[i].clk = clk;
  122. }
  123. return 0;
  124. }
  125. static int sde_smmu_enable_power(struct sde_smmu_client *sde_smmu,
  126. bool enable)
  127. {
  128. int rc = 0;
  129. struct sde_module_power *mp;
  130. if (!sde_smmu)
  131. return -EINVAL;
  132. mp = &sde_smmu->mp;
  133. if (!mp->num_vreg && !mp->num_clk)
  134. return 0;
  135. if (enable) {
  136. rc = sde_rot_enable_vreg(mp->vreg_config, mp->num_vreg, true);
  137. if (rc) {
  138. SDEROT_ERR("vreg enable failed - rc:%d\n", rc);
  139. goto end;
  140. }
  141. sde_update_reg_bus_vote(sde_smmu->reg_bus_clt,
  142. VOTE_INDEX_19_MHZ);
  143. rc = sde_rot_enable_clk(mp->clk_config, mp->num_clk, true);
  144. if (rc) {
  145. SDEROT_ERR("clock enable failed - rc:%d\n", rc);
  146. sde_update_reg_bus_vote(sde_smmu->reg_bus_clt,
  147. VOTE_INDEX_DISABLE);
  148. sde_rot_enable_vreg(mp->vreg_config, mp->num_vreg,
  149. false);
  150. goto end;
  151. }
  152. } else {
  153. sde_rot_enable_clk(mp->clk_config, mp->num_clk, false);
  154. sde_update_reg_bus_vote(sde_smmu->reg_bus_clt,
  155. VOTE_INDEX_DISABLE);
  156. sde_rot_enable_vreg(mp->vreg_config, mp->num_vreg, false);
  157. }
  158. end:
  159. return rc;
  160. }
  161. /*
  162. * sde_smmu_attach()
  163. *
  164. * Associates each configured VA range with the corresponding smmu context
  165. * bank device. Enables the clks as smmu requires voting it before the usage.
  166. * And iommu attach is done only once during the initial attach and it is never
  167. * detached as smmu v2 uses a feature called 'retention'.
  168. */
  169. int sde_smmu_attach(struct sde_rot_data_type *mdata)
  170. {
  171. struct sde_smmu_client *sde_smmu;
  172. int i, rc = 0;
  173. for (i = 0; i < SDE_IOMMU_MAX_DOMAIN; i++) {
  174. if (!sde_smmu_is_valid_domain_type(mdata, i))
  175. continue;
  176. sde_smmu = sde_smmu_get_cb(i);
  177. if (sde_smmu && sde_smmu->dev) {
  178. rc = sde_smmu_enable_power(sde_smmu, true);
  179. if (rc) {
  180. SDEROT_ERR(
  181. "power enable failed - domain:[%d] rc:%d\n",
  182. i, rc);
  183. goto err;
  184. }
  185. if (!sde_smmu->domain_attached &&
  186. sde_smmu_is_valid_domain_condition(mdata,
  187. i,
  188. true)) {
  189. rc = iommu_attach_device(
  190. sde_smmu->rot_domain, sde_smmu->dev);
  191. if (rc) {
  192. SDEROT_ERR(
  193. "iommu attach device failed for domain[%d] with err:%d\n",
  194. i, rc);
  195. sde_smmu_enable_power(sde_smmu,
  196. false);
  197. goto err;
  198. }
  199. sde_smmu->domain_attached = true;
  200. SDEROT_DBG("iommu v2 domain[%i] attached\n", i);
  201. }
  202. } else {
  203. SDEROT_DBG(
  204. "iommu device not attached for domain[%d]\n",
  205. i);
  206. }
  207. }
  208. return 0;
  209. err:
  210. for (i--; i >= 0; i--) {
  211. sde_smmu = sde_smmu_get_cb(i);
  212. if (sde_smmu && sde_smmu->dev) {
  213. iommu_detach_device(sde_smmu->rot_domain,
  214. sde_smmu->dev);
  215. sde_smmu_enable_power(sde_smmu, false);
  216. sde_smmu->domain_attached = false;
  217. }
  218. }
  219. return rc;
  220. }
  221. /*
  222. * sde_smmu_detach()
  223. *
  224. * Only disables the clks as it is not required to detach the iommu mapped
  225. * VA range from the device in smmu as explained in the sde_smmu_attach
  226. */
  227. int sde_smmu_detach(struct sde_rot_data_type *mdata)
  228. {
  229. struct sde_smmu_client *sde_smmu;
  230. int i;
  231. for (i = 0; i < SDE_IOMMU_MAX_DOMAIN; i++) {
  232. if (!sde_smmu_is_valid_domain_type(mdata, i))
  233. continue;
  234. sde_smmu = sde_smmu_get_cb(i);
  235. if (sde_smmu && sde_smmu->dev) {
  236. if (sde_smmu->domain_attached &&
  237. sde_smmu_is_valid_domain_condition(mdata,
  238. i, false)) {
  239. iommu_detach_device(sde_smmu->rot_domain,
  240. sde_smmu->dev);
  241. SDEROT_DBG("iommu domain[%i] detached\n", i);
  242. sde_smmu->domain_attached = false;
  243. }
  244. else {
  245. sde_smmu_enable_power(sde_smmu, false);
  246. }
  247. }
  248. }
  249. return 0;
  250. }
  251. int sde_smmu_get_domain_id(u32 type)
  252. {
  253. return type;
  254. }
  255. /*
  256. * sde_smmu_dma_buf_attach()
  257. *
  258. * Same as sde_smmu_dma_buf_attach except that the device is got from
  259. * the configured smmu v2 context banks.
  260. */
  261. struct dma_buf_attachment *sde_smmu_dma_buf_attach(
  262. struct dma_buf *dma_buf, struct device *dev, int domain)
  263. {
  264. struct sde_smmu_client *sde_smmu = sde_smmu_get_cb(domain);
  265. if (!sde_smmu) {
  266. SDEROT_ERR("not able to get smmu context\n");
  267. return NULL;
  268. }
  269. return dma_buf_attach(dma_buf, sde_smmu->dev);
  270. }
  271. /*
  272. * sde_smmu_map_dma_buf()
  273. *
  274. * Maps existing buffer (by struct scatterlist) into SMMU context bank device.
  275. * From which we can take the virtual address and size allocated.
  276. * msm_map_dma_buf is depricated with smmu v2 and it uses dma_map_sg instead
  277. */
  278. int sde_smmu_map_dma_buf(struct dma_buf *dma_buf,
  279. struct sg_table *table, int domain, dma_addr_t *iova,
  280. unsigned long *size, int dir)
  281. {
  282. int rc;
  283. struct sde_smmu_client *sde_smmu = sde_smmu_get_cb(domain);
  284. unsigned long attrs = 0;
  285. if (!sde_smmu) {
  286. SDEROT_ERR("not able to get smmu context\n");
  287. return -EINVAL;
  288. }
  289. rc = dma_map_sg_attrs(sde_smmu->dev, table->sgl, table->nents, dir,
  290. attrs);
  291. if (!rc) {
  292. SDEROT_ERR("dma map sg failed\n");
  293. return -ENOMEM;
  294. }
  295. *iova = table->sgl->dma_address;
  296. *size = table->sgl->dma_length;
  297. return 0;
  298. }
  299. void sde_smmu_unmap_dma_buf(struct sg_table *table, int domain,
  300. int dir, struct dma_buf *dma_buf)
  301. {
  302. struct sde_smmu_client *sde_smmu = sde_smmu_get_cb(domain);
  303. if (!sde_smmu) {
  304. SDEROT_ERR("not able to get smmu context\n");
  305. return;
  306. }
  307. dma_unmap_sg(sde_smmu->dev, table->sgl, table->nents, dir);
  308. }
  309. static DEFINE_MUTEX(sde_smmu_ref_cnt_lock);
  310. int sde_smmu_ctrl(int enable)
  311. {
  312. struct sde_rot_data_type *mdata = sde_rot_get_mdata();
  313. int rc = 0;
  314. mutex_lock(&sde_smmu_ref_cnt_lock);
  315. SDEROT_EVTLOG(__builtin_return_address(0), enable, mdata->iommu_ref_cnt,
  316. mdata->iommu_attached);
  317. SDEROT_DBG("%pS: enable:%d ref_cnt:%d attach:%d\n",
  318. __builtin_return_address(0), enable, mdata->iommu_ref_cnt,
  319. mdata->iommu_attached);
  320. if (enable) {
  321. if (!mdata->iommu_attached) {
  322. rc = sde_smmu_attach(mdata);
  323. if (!rc)
  324. mdata->iommu_attached = true;
  325. }
  326. mdata->iommu_ref_cnt++;
  327. } else {
  328. if (mdata->iommu_ref_cnt) {
  329. mdata->iommu_ref_cnt--;
  330. if (mdata->iommu_ref_cnt == 0)
  331. if (mdata->iommu_attached) {
  332. rc = sde_smmu_detach(mdata);
  333. if (!rc)
  334. mdata->iommu_attached = false;
  335. }
  336. } else {
  337. SDEROT_ERR("unbalanced iommu ref\n");
  338. }
  339. }
  340. mutex_unlock(&sde_smmu_ref_cnt_lock);
  341. if (rc < 0)
  342. return rc;
  343. else
  344. return mdata->iommu_ref_cnt;
  345. }
  346. int sde_smmu_secure_ctrl(int enable)
  347. {
  348. struct sde_rot_data_type *mdata = sde_rot_get_mdata();
  349. int rc = 0;
  350. mutex_lock(&sde_smmu_ref_cnt_lock);
  351. /*
  352. * Attach/detach secure context irrespective of ref count,
  353. * We come here only when secure camera is disabled
  354. */
  355. if (enable) {
  356. rc = sde_smmu_attach(mdata);
  357. if (!rc)
  358. mdata->iommu_attached = true;
  359. } else {
  360. rc = sde_smmu_detach(mdata);
  361. /*
  362. * keep iommu_attached equal to true,
  363. * so that driver does not attemp to attach
  364. * while in secure state
  365. */
  366. }
  367. mutex_unlock(&sde_smmu_ref_cnt_lock);
  368. return rc;
  369. }
  370. /*
  371. * sde_smmu_device_create()
  372. * @dev: sde_mdp device
  373. *
  374. * For smmu, each context bank is a separate child device of sde rot.
  375. * Platform devices are created for those smmu related child devices of
  376. * sde rot here. This would facilitate probes to happen for these devices in
  377. * which the smmu mapping and initialization is handled.
  378. */
  379. void sde_smmu_device_create(struct device *dev)
  380. {
  381. struct device_node *parent, *child;
  382. parent = dev->of_node;
  383. for_each_child_of_node(parent, child) {
  384. if (of_device_is_compatible(child, SMMU_SDE_ROT_SEC))
  385. of_platform_device_create(child, NULL, dev);
  386. else if (of_device_is_compatible(child, SMMU_SDE_ROT_UNSEC))
  387. of_platform_device_create(child, NULL, dev);
  388. }
  389. }
  390. int sde_smmu_init(struct device *dev)
  391. {
  392. sde_smmu_device_create(dev);
  393. return 0;
  394. }
  395. static int sde_smmu_fault_handler(struct iommu_domain *domain,
  396. struct device *dev, unsigned long iova,
  397. int flags, void *token)
  398. {
  399. struct sde_smmu_client *sde_smmu;
  400. int rc = -EINVAL;
  401. if (!token) {
  402. SDEROT_ERR("Error: token is NULL\n");
  403. return -EINVAL;
  404. }
  405. sde_smmu = (struct sde_smmu_client *)token;
  406. /* trigger rotator dump */
  407. SDEROT_ERR("trigger rotator dump, iova=0x%08lx, flags=0x%x\n",
  408. iova, flags);
  409. SDEROT_ERR("SMMU device:%s", sde_smmu->dev->kobj.name);
  410. /* generate dump, but no panic */
  411. SDEROT_EVTLOG_TOUT_HANDLER("rot", "rot_dbg_bus", "vbif_dbg_bus");
  412. /*
  413. * return -ENOSYS to allow smmu driver to dump out useful
  414. * debug info.
  415. */
  416. return rc;
  417. }
  418. static struct sde_smmu_domain sde_rot_unsec = {
  419. "rot_0", SDE_IOMMU_DOMAIN_ROT_UNSECURE};
  420. static struct sde_smmu_domain sde_rot_sec = {
  421. "rot_1", SDE_IOMMU_DOMAIN_ROT_SECURE};
  422. static const struct of_device_id sde_smmu_dt_match[] = {
  423. { .compatible = SMMU_SDE_ROT_UNSEC, .data = &sde_rot_unsec},
  424. { .compatible = SMMU_SDE_ROT_SEC, .data = &sde_rot_sec},
  425. {}
  426. };
  427. MODULE_DEVICE_TABLE(of, sde_smmu_dt_match);
  428. /*
  429. * sde_smmu_probe()
  430. * @pdev: platform device
  431. *
  432. * Each smmu context acts as a separate device and the context banks are
  433. * configured with a VA range.
  434. * Registeres the clks as each context bank has its own clks, for which voting
  435. * has to be done everytime before using that context bank.
  436. */
  437. int sde_smmu_probe(struct platform_device *pdev)
  438. {
  439. struct device *dev;
  440. struct sde_rot_data_type *mdata = sde_rot_get_mdata();
  441. struct sde_smmu_client *sde_smmu;
  442. int rc = 0;
  443. struct sde_smmu_domain smmu_domain;
  444. const struct of_device_id *match;
  445. struct sde_module_power *mp;
  446. char name[MAX_CLIENT_NAME_LEN];
  447. u32 sid = 0;
  448. if (!mdata) {
  449. SDEROT_INFO(
  450. "probe failed as mdata is not initializedi, probe defer\n");
  451. return -EPROBE_DEFER;
  452. }
  453. match = of_match_device(sde_smmu_dt_match, &pdev->dev);
  454. if (!match || !match->data) {
  455. SDEROT_ERR("probe failed as match data is invalid\n");
  456. return -EINVAL;
  457. }
  458. smmu_domain = *(struct sde_smmu_domain *) (match->data);
  459. if (smmu_domain.domain >= SDE_IOMMU_MAX_DOMAIN) {
  460. SDEROT_ERR("no matching device found\n");
  461. return -EINVAL;
  462. }
  463. if (of_find_property(pdev->dev.of_node, "iommus", NULL)) {
  464. dev = &pdev->dev;
  465. rc = of_property_read_u32_index(pdev->dev.of_node, "iommus",
  466. 1, &sid);
  467. if (rc)
  468. SDEROT_DBG("SID not defined for domain:%d",
  469. smmu_domain.domain);
  470. } else {
  471. SDEROT_ERR("Invalid SMMU ctx for domain:%d\n",
  472. smmu_domain.domain);
  473. return -EINVAL;
  474. }
  475. sde_smmu = &mdata->sde_smmu[smmu_domain.domain];
  476. sde_smmu->domain = smmu_domain.domain;
  477. sde_smmu->sid = sid;
  478. mp = &sde_smmu->mp;
  479. memset(mp, 0, sizeof(struct sde_module_power));
  480. if (of_find_property(pdev->dev.of_node,
  481. "gdsc-mdss-supply", NULL)) {
  482. mp->vreg_config = devm_kzalloc(&pdev->dev,
  483. sizeof(struct sde_vreg), GFP_KERNEL);
  484. if (!mp->vreg_config)
  485. return -ENOMEM;
  486. strlcpy(mp->vreg_config->vreg_name, "gdsc-mdss",
  487. sizeof(mp->vreg_config->vreg_name));
  488. mp->num_vreg = 1;
  489. }
  490. if (mp->vreg_config) {
  491. rc = sde_rot_config_vreg(&pdev->dev, mp->vreg_config,
  492. mp->num_vreg, true);
  493. if (rc) {
  494. SDEROT_ERR("vreg config failed rc=%d\n", rc);
  495. goto release_vreg;
  496. }
  497. }
  498. rc = sde_smmu_clk_register(pdev, mp);
  499. if (rc) {
  500. SDEROT_ERR(
  501. "smmu clk register failed for domain[%d] with err:%d\n",
  502. smmu_domain.domain, rc);
  503. goto disable_vreg;
  504. }
  505. snprintf(name, MAX_CLIENT_NAME_LEN, "smmu:%u", smmu_domain.domain);
  506. sde_smmu->reg_bus_clt = sde_reg_bus_vote_client_create(name);
  507. if (IS_ERR_OR_NULL(sde_smmu->reg_bus_clt)) {
  508. SDEROT_ERR("mdss bus client register failed\n");
  509. rc = PTR_ERR(sde_smmu->reg_bus_clt);
  510. sde_smmu->reg_bus_clt = NULL;
  511. goto unregister_clk;
  512. }
  513. rc = sde_smmu_enable_power(sde_smmu, true);
  514. if (rc) {
  515. SDEROT_ERR("power enable failed - domain:[%d] rc:%d\n",
  516. smmu_domain.domain, rc);
  517. goto bus_client_destroy;
  518. }
  519. sde_smmu->dev = &pdev->dev;
  520. sde_smmu->rot_domain = iommu_get_domain_for_dev(sde_smmu->dev);
  521. if (!sde_smmu->rot_domain) {
  522. dev_err(&pdev->dev, "iommu get domain failed\n");
  523. return -EINVAL;
  524. }
  525. if (smmu_domain.domain == SDE_IOMMU_DOMAIN_ROT_SECURE) {
  526. int secure_vmid = VMID_CP_PIXEL;
  527. rc = iommu_domain_set_attr(sde_smmu->rot_domain,
  528. DOMAIN_ATTR_SECURE_VMID, &secure_vmid);
  529. if (rc) {
  530. SDEROT_ERR("couldn't set secure pixel vmid\n");
  531. goto release_mapping;
  532. }
  533. }
  534. if (!dev->dma_parms)
  535. dev->dma_parms = devm_kzalloc(dev,
  536. sizeof(*dev->dma_parms), GFP_KERNEL);
  537. dma_set_max_seg_size(dev, DMA_BIT_MASK(32));
  538. dma_set_seg_boundary(dev, DMA_BIT_MASK(64));
  539. iommu_set_fault_handler(sde_smmu->rot_domain,
  540. sde_smmu_fault_handler, (void *)sde_smmu);
  541. sde_smmu_enable_power(sde_smmu, false);
  542. SDEROT_INFO(
  543. "iommu v2 domain[%d] mapping and clk register successful!\n",
  544. smmu_domain.domain);
  545. return 0;
  546. release_mapping:
  547. sde_smmu->rot_domain = NULL;
  548. bus_client_destroy:
  549. sde_reg_bus_vote_client_destroy(sde_smmu->reg_bus_clt);
  550. sde_smmu->reg_bus_clt = NULL;
  551. unregister_clk:
  552. disable_vreg:
  553. sde_rot_config_vreg(&pdev->dev, sde_smmu->mp.vreg_config,
  554. sde_smmu->mp.num_vreg, false);
  555. release_vreg:
  556. devm_kfree(&pdev->dev, sde_smmu->mp.vreg_config);
  557. sde_smmu->mp.vreg_config = NULL;
  558. sde_smmu->mp.num_vreg = 0;
  559. return rc;
  560. }
  561. int sde_smmu_remove(struct platform_device *pdev)
  562. {
  563. int i;
  564. struct sde_smmu_client *sde_smmu;
  565. for (i = 0; i < SDE_IOMMU_MAX_DOMAIN; i++) {
  566. sde_smmu = sde_smmu_get_cb(i);
  567. if (!sde_smmu || !sde_smmu->dev ||
  568. (sde_smmu->dev != &pdev->dev))
  569. continue;
  570. sde_smmu->dev = NULL;
  571. sde_smmu->rot_domain = NULL;
  572. sde_smmu_enable_power(sde_smmu, false);
  573. sde_reg_bus_vote_client_destroy(sde_smmu->reg_bus_clt);
  574. sde_smmu->reg_bus_clt = NULL;
  575. sde_rot_config_vreg(&pdev->dev, sde_smmu->mp.vreg_config,
  576. sde_smmu->mp.num_vreg, false);
  577. devm_kfree(&pdev->dev, sde_smmu->mp.vreg_config);
  578. sde_smmu->mp.vreg_config = NULL;
  579. sde_smmu->mp.num_vreg = 0;
  580. }
  581. return 0;
  582. }
  583. static struct platform_driver sde_smmu_driver = {
  584. .probe = sde_smmu_probe,
  585. .remove = sde_smmu_remove,
  586. .shutdown = NULL,
  587. .driver = {
  588. .name = "sde_smmu",
  589. .of_match_table = sde_smmu_dt_match,
  590. },
  591. };
  592. static int sde_smmu_register_driver(void)
  593. {
  594. return platform_driver_register(&sde_smmu_driver);
  595. }
  596. static int __init sde_smmu_driver_init(void)
  597. {
  598. int ret;
  599. ret = sde_smmu_register_driver();
  600. if (ret)
  601. SDEROT_ERR("sde_smmu_register_driver() failed!\n");
  602. return ret;
  603. }
  604. module_init(sde_smmu_driver_init);
  605. static void __exit sde_smmu_driver_cleanup(void)
  606. {
  607. platform_driver_unregister(&sde_smmu_driver);
  608. }
  609. module_exit(sde_smmu_driver_cleanup);
  610. MODULE_LICENSE("GPL v2");
  611. MODULE_DESCRIPTION("SDE SMMU driver");