sde_rotator_smmu.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2015-2020, The Linux Foundation. All rights reserved.
  4. */
  5. #define pr_fmt(fmt) "%s: " fmt, __func__
  6. #include <linux/clk.h>
  7. #include <linux/debugfs.h>
  8. #include <linux/kernel.h>
  9. #include <linux/module.h>
  10. #include <linux/iommu.h>
  11. #include <linux/of.h>
  12. #include <linux/of_address.h>
  13. #include <linux/dma-mapping.h>
  14. #include <linux/dma-buf.h>
  15. #include <linux/of_platform.h>
  16. #include <linux/msm_dma_iommu_mapping.h>
  17. #include "soc/qcom/secure_buffer.h"
  18. #include "sde_rotator_base.h"
  19. #include "sde_rotator_util.h"
  20. #include "sde_rotator_io_util.h"
  21. #include "sde_rotator_smmu.h"
  22. #include "sde_rotator_debug.h"
  23. #define SMMU_SDE_ROT_SEC "qcom,smmu_sde_rot_sec"
  24. #define SMMU_SDE_ROT_UNSEC "qcom,smmu_sde_rot_unsec"
  25. struct sde_smmu_domain {
  26. char *ctx_name;
  27. int domain;
  28. };
  29. static inline bool sde_smmu_is_valid_domain_type(
  30. struct sde_rot_data_type *mdata, int domain_type)
  31. {
  32. return true;
  33. }
  34. static inline bool sde_smmu_is_valid_domain_condition(
  35. struct sde_rot_data_type *mdata,
  36. int domain_type,
  37. bool is_attach)
  38. {
  39. if (is_attach) {
  40. if (test_bit(SDE_CAPS_SEC_ATTACH_DETACH_SMMU,
  41. mdata->sde_caps_map) &&
  42. (mdata->sec_cam_en &&
  43. domain_type == SDE_IOMMU_DOMAIN_ROT_SECURE))
  44. return false;
  45. else
  46. return true;
  47. } else {
  48. if (test_bit(SDE_CAPS_SEC_ATTACH_DETACH_SMMU,
  49. mdata->sde_caps_map) &&
  50. (mdata->sec_cam_en &&
  51. domain_type == SDE_IOMMU_DOMAIN_ROT_SECURE))
  52. return true;
  53. else
  54. return false;
  55. }
  56. }
  57. struct sde_smmu_client *sde_smmu_get_cb(u32 domain)
  58. {
  59. struct sde_rot_data_type *mdata = sde_rot_get_mdata();
  60. if (!sde_smmu_is_valid_domain_type(mdata, domain))
  61. return NULL;
  62. return (domain >= SDE_IOMMU_MAX_DOMAIN) ? NULL :
  63. &mdata->sde_smmu[domain];
  64. }
  65. static int sde_smmu_util_parse_dt_clock(struct platform_device *pdev,
  66. struct sde_module_power *mp)
  67. {
  68. u32 i = 0, rc = 0;
  69. const char *clock_name;
  70. u32 clock_rate;
  71. int num_clk;
  72. num_clk = of_property_count_strings(pdev->dev.of_node,
  73. "clock-names");
  74. if (num_clk < 0) {
  75. SDEROT_DBG("clocks are not defined\n");
  76. num_clk = 0;
  77. }
  78. mp->num_clk = num_clk;
  79. mp->clk_config = devm_kzalloc(&pdev->dev,
  80. sizeof(struct sde_clk) * mp->num_clk, GFP_KERNEL);
  81. if (num_clk && !mp->clk_config) {
  82. rc = -ENOMEM;
  83. mp->num_clk = 0;
  84. goto clk_err;
  85. }
  86. for (i = 0; i < mp->num_clk; i++) {
  87. of_property_read_string_index(pdev->dev.of_node, "clock-names",
  88. i, &clock_name);
  89. strlcpy(mp->clk_config[i].clk_name, clock_name,
  90. sizeof(mp->clk_config[i].clk_name));
  91. of_property_read_u32_index(pdev->dev.of_node, "clock-rate",
  92. i, &clock_rate);
  93. mp->clk_config[i].rate = clock_rate;
  94. if (!clock_rate)
  95. mp->clk_config[i].type = SDE_CLK_AHB;
  96. else
  97. mp->clk_config[i].type = SDE_CLK_PCLK;
  98. }
  99. clk_err:
  100. return rc;
  101. }
  102. static int sde_smmu_clk_register(struct platform_device *pdev,
  103. struct sde_module_power *mp)
  104. {
  105. int i, ret;
  106. struct clk *clk;
  107. ret = sde_smmu_util_parse_dt_clock(pdev, mp);
  108. if (ret) {
  109. SDEROT_ERR("unable to parse clocks\n");
  110. return -EINVAL;
  111. }
  112. for (i = 0; i < mp->num_clk; i++) {
  113. clk = devm_clk_get(&pdev->dev,
  114. mp->clk_config[i].clk_name);
  115. if (IS_ERR(clk)) {
  116. SDEROT_ERR("unable to get clk: %s\n",
  117. mp->clk_config[i].clk_name);
  118. return PTR_ERR(clk);
  119. }
  120. mp->clk_config[i].clk = clk;
  121. }
  122. return 0;
  123. }
  124. static int sde_smmu_enable_power(struct sde_smmu_client *sde_smmu,
  125. bool enable)
  126. {
  127. int rc = 0;
  128. struct sde_module_power *mp;
  129. if (!sde_smmu)
  130. return -EINVAL;
  131. mp = &sde_smmu->mp;
  132. if (!mp->num_vreg && !mp->num_clk)
  133. return 0;
  134. if (enable) {
  135. rc = sde_rot_enable_vreg(mp->vreg_config, mp->num_vreg, true);
  136. if (rc) {
  137. SDEROT_ERR("vreg enable failed - rc:%d\n", rc);
  138. goto end;
  139. }
  140. sde_update_reg_bus_vote(sde_smmu->reg_bus_clt,
  141. VOTE_INDEX_76_MHZ);
  142. rc = sde_rot_enable_clk(mp->clk_config, mp->num_clk, true);
  143. if (rc) {
  144. SDEROT_ERR("clock enable failed - rc:%d\n", rc);
  145. sde_update_reg_bus_vote(sde_smmu->reg_bus_clt,
  146. VOTE_INDEX_DISABLE);
  147. sde_rot_enable_vreg(mp->vreg_config, mp->num_vreg,
  148. false);
  149. goto end;
  150. }
  151. } else {
  152. sde_rot_enable_clk(mp->clk_config, mp->num_clk, false);
  153. sde_update_reg_bus_vote(sde_smmu->reg_bus_clt,
  154. VOTE_INDEX_DISABLE);
  155. sde_rot_enable_vreg(mp->vreg_config, mp->num_vreg, false);
  156. }
  157. end:
  158. return rc;
  159. }
  160. /*
  161. * sde_smmu_attach()
  162. *
  163. * Associates each configured VA range with the corresponding smmu context
  164. * bank device. Enables the clks as smmu requires voting it before the usage.
  165. * And iommu attach is done only once during the initial attach and it is never
  166. * detached as smmu v2 uses a feature called 'retention'.
  167. */
  168. int sde_smmu_attach(struct sde_rot_data_type *mdata)
  169. {
  170. struct sde_smmu_client *sde_smmu;
  171. int i, rc = 0;
  172. for (i = 0; i < SDE_IOMMU_MAX_DOMAIN; i++) {
  173. if (!sde_smmu_is_valid_domain_type(mdata, i))
  174. continue;
  175. sde_smmu = sde_smmu_get_cb(i);
  176. if (sde_smmu && sde_smmu->dev) {
  177. rc = sde_smmu_enable_power(sde_smmu, true);
  178. if (rc) {
  179. SDEROT_ERR(
  180. "power enable failed - domain:[%d] rc:%d\n",
  181. i, rc);
  182. goto err;
  183. }
  184. if (!sde_smmu->domain_attached &&
  185. sde_smmu_is_valid_domain_condition(mdata,
  186. i,
  187. true)) {
  188. rc = iommu_attach_device(
  189. sde_smmu->rot_domain, sde_smmu->dev);
  190. if (rc) {
  191. SDEROT_ERR(
  192. "iommu attach device failed for domain[%d] with err:%d\n",
  193. i, rc);
  194. sde_smmu_enable_power(sde_smmu,
  195. false);
  196. goto err;
  197. }
  198. sde_smmu->domain_attached = true;
  199. SDEROT_DBG("iommu v2 domain[%i] attached\n", i);
  200. }
  201. } else {
  202. SDEROT_DBG(
  203. "iommu device not attached for domain[%d]\n",
  204. i);
  205. }
  206. }
  207. return 0;
  208. err:
  209. for (i--; i >= 0; i--) {
  210. sde_smmu = sde_smmu_get_cb(i);
  211. if (sde_smmu && sde_smmu->dev) {
  212. iommu_detach_device(sde_smmu->rot_domain,
  213. sde_smmu->dev);
  214. sde_smmu_enable_power(sde_smmu, false);
  215. sde_smmu->domain_attached = false;
  216. }
  217. }
  218. return rc;
  219. }
  220. /*
  221. * sde_smmu_detach()
  222. *
  223. * Only disables the clks as it is not required to detach the iommu mapped
  224. * VA range from the device in smmu as explained in the sde_smmu_attach
  225. */
  226. int sde_smmu_detach(struct sde_rot_data_type *mdata)
  227. {
  228. struct sde_smmu_client *sde_smmu;
  229. int i;
  230. for (i = 0; i < SDE_IOMMU_MAX_DOMAIN; i++) {
  231. if (!sde_smmu_is_valid_domain_type(mdata, i))
  232. continue;
  233. sde_smmu = sde_smmu_get_cb(i);
  234. if (sde_smmu && sde_smmu->dev) {
  235. if (sde_smmu->domain_attached &&
  236. sde_smmu_is_valid_domain_condition(mdata,
  237. i, false)) {
  238. iommu_detach_device(sde_smmu->rot_domain,
  239. sde_smmu->dev);
  240. SDEROT_DBG("iommu domain[%i] detached\n", i);
  241. sde_smmu->domain_attached = false;
  242. }
  243. else {
  244. sde_smmu_enable_power(sde_smmu, false);
  245. }
  246. }
  247. }
  248. return 0;
  249. }
  250. int sde_smmu_get_domain_id(u32 type)
  251. {
  252. return type;
  253. }
  254. /*
  255. * sde_smmu_dma_buf_attach()
  256. *
  257. * Same as sde_smmu_dma_buf_attach except that the device is got from
  258. * the configured smmu v2 context banks.
  259. */
  260. struct dma_buf_attachment *sde_smmu_dma_buf_attach(
  261. struct dma_buf *dma_buf, struct device *dev, int domain)
  262. {
  263. struct sde_smmu_client *sde_smmu = sde_smmu_get_cb(domain);
  264. if (!sde_smmu) {
  265. SDEROT_ERR("not able to get smmu context\n");
  266. return NULL;
  267. }
  268. return dma_buf_attach(dma_buf, sde_smmu->dev);
  269. }
  270. /*
  271. * sde_smmu_map_dma_buf()
  272. *
  273. * Maps existing buffer (by struct scatterlist) into SMMU context bank device.
  274. * From which we can take the virtual address and size allocated.
  275. * msm_map_dma_buf is depricated with smmu v2 and it uses dma_map_sg instead
  276. */
  277. int sde_smmu_map_dma_buf(struct dma_buf *dma_buf,
  278. struct sg_table *table, int domain, dma_addr_t *iova,
  279. unsigned long *size, int dir)
  280. {
  281. int rc;
  282. struct sde_smmu_client *sde_smmu = sde_smmu_get_cb(domain);
  283. unsigned long attrs = 0;
  284. if (!sde_smmu) {
  285. SDEROT_ERR("not able to get smmu context\n");
  286. return -EINVAL;
  287. }
  288. rc = dma_map_sg_attrs(sde_smmu->dev, table->sgl, table->nents, dir,
  289. attrs);
  290. if (!rc) {
  291. SDEROT_ERR("dma map sg failed\n");
  292. return -ENOMEM;
  293. }
  294. *iova = table->sgl->dma_address;
  295. *size = table->sgl->dma_length;
  296. return 0;
  297. }
  298. void sde_smmu_unmap_dma_buf(struct sg_table *table, int domain,
  299. int dir, struct dma_buf *dma_buf)
  300. {
  301. struct sde_smmu_client *sde_smmu = sde_smmu_get_cb(domain);
  302. if (!sde_smmu) {
  303. SDEROT_ERR("not able to get smmu context\n");
  304. return;
  305. }
  306. dma_unmap_sg(sde_smmu->dev, table->sgl, table->nents, dir);
  307. }
  308. static DEFINE_MUTEX(sde_smmu_ref_cnt_lock);
  309. int sde_smmu_ctrl(int enable)
  310. {
  311. struct sde_rot_data_type *mdata = sde_rot_get_mdata();
  312. int rc = 0;
  313. mutex_lock(&sde_smmu_ref_cnt_lock);
  314. SDEROT_EVTLOG(__builtin_return_address(0), enable, mdata->iommu_ref_cnt,
  315. mdata->iommu_attached);
  316. SDEROT_DBG("%pS: enable:%d ref_cnt:%d attach:%d\n",
  317. __builtin_return_address(0), enable, mdata->iommu_ref_cnt,
  318. mdata->iommu_attached);
  319. if (enable) {
  320. if (!mdata->iommu_attached) {
  321. rc = sde_smmu_attach(mdata);
  322. if (!rc)
  323. mdata->iommu_attached = true;
  324. }
  325. mdata->iommu_ref_cnt++;
  326. } else {
  327. if (mdata->iommu_ref_cnt) {
  328. mdata->iommu_ref_cnt--;
  329. if (mdata->iommu_ref_cnt == 0)
  330. if (mdata->iommu_attached) {
  331. rc = sde_smmu_detach(mdata);
  332. if (!rc)
  333. mdata->iommu_attached = false;
  334. }
  335. } else {
  336. SDEROT_ERR("unbalanced iommu ref\n");
  337. }
  338. }
  339. mutex_unlock(&sde_smmu_ref_cnt_lock);
  340. if (rc < 0)
  341. return rc;
  342. else
  343. return mdata->iommu_ref_cnt;
  344. }
  345. int sde_smmu_secure_ctrl(int enable)
  346. {
  347. struct sde_rot_data_type *mdata = sde_rot_get_mdata();
  348. int rc = 0;
  349. mutex_lock(&sde_smmu_ref_cnt_lock);
  350. /*
  351. * Attach/detach secure context irrespective of ref count,
  352. * We come here only when secure camera is disabled
  353. */
  354. if (enable) {
  355. rc = sde_smmu_attach(mdata);
  356. if (!rc)
  357. mdata->iommu_attached = true;
  358. } else {
  359. rc = sde_smmu_detach(mdata);
  360. /*
  361. * keep iommu_attached equal to true,
  362. * so that driver does not attemp to attach
  363. * while in secure state
  364. */
  365. }
  366. mutex_unlock(&sde_smmu_ref_cnt_lock);
  367. return rc;
  368. }
  369. /*
  370. * sde_smmu_device_create()
  371. * @dev: sde_mdp device
  372. *
  373. * For smmu, each context bank is a separate child device of sde rot.
  374. * Platform devices are created for those smmu related child devices of
  375. * sde rot here. This would facilitate probes to happen for these devices in
  376. * which the smmu mapping and initialization is handled.
  377. */
  378. void sde_smmu_device_create(struct device *dev)
  379. {
  380. struct device_node *parent, *child;
  381. struct sde_rot_data_type *mdata = sde_rot_get_mdata();
  382. parent = dev->of_node;
  383. for_each_child_of_node(parent, child) {
  384. if (of_device_is_compatible(child, SMMU_SDE_ROT_SEC)) {
  385. of_platform_device_create(child, NULL, dev);
  386. mdata->sde_smmu
  387. [SDE_IOMMU_DOMAIN_ROT_SECURE].domain_attached = true;
  388. } else if (of_device_is_compatible(child, SMMU_SDE_ROT_UNSEC)) {
  389. of_platform_device_create(child, NULL, dev);
  390. mdata->sde_smmu
  391. [SDE_IOMMU_DOMAIN_ROT_UNSECURE].domain_attached = true;
  392. }
  393. }
  394. }
  395. int sde_smmu_init(struct device *dev)
  396. {
  397. sde_smmu_device_create(dev);
  398. return 0;
  399. }
  400. static int sde_smmu_fault_handler(struct iommu_domain *domain,
  401. struct device *dev, unsigned long iova,
  402. int flags, void *token)
  403. {
  404. struct sde_smmu_client *sde_smmu;
  405. int rc = -EINVAL;
  406. if (!token) {
  407. SDEROT_ERR("Error: token is NULL\n");
  408. return -EINVAL;
  409. }
  410. sde_smmu = (struct sde_smmu_client *)token;
  411. /* trigger rotator dump */
  412. SDEROT_ERR("trigger rotator dump, iova=0x%08lx, flags=0x%x\n",
  413. iova, flags);
  414. SDEROT_ERR("SMMU device:%s", sde_smmu->dev->kobj.name);
  415. /* generate dump, but no panic */
  416. SDEROT_EVTLOG_TOUT_HANDLER("rot", "rot_dbg_bus", "vbif_dbg_bus");
  417. /*
  418. * return -ENOSYS to allow smmu driver to dump out useful
  419. * debug info.
  420. */
  421. return rc;
  422. }
  423. static struct sde_smmu_domain sde_rot_unsec = {
  424. "rot_0", SDE_IOMMU_DOMAIN_ROT_UNSECURE};
  425. static struct sde_smmu_domain sde_rot_sec = {
  426. "rot_1", SDE_IOMMU_DOMAIN_ROT_SECURE};
  427. static const struct of_device_id sde_smmu_dt_match[] = {
  428. { .compatible = SMMU_SDE_ROT_UNSEC, .data = &sde_rot_unsec},
  429. { .compatible = SMMU_SDE_ROT_SEC, .data = &sde_rot_sec},
  430. {}
  431. };
  432. /*
  433. * sde_smmu_probe()
  434. * @pdev: platform device
  435. *
  436. * Each smmu context acts as a separate device and the context banks are
  437. * configured with a VA range.
  438. * Registeres the clks as each context bank has its own clks, for which voting
  439. * has to be done everytime before using that context bank.
  440. */
  441. int sde_smmu_probe(struct platform_device *pdev)
  442. {
  443. struct device *dev;
  444. struct sde_rot_data_type *mdata = sde_rot_get_mdata();
  445. struct sde_smmu_client *sde_smmu;
  446. int rc = 0;
  447. struct sde_smmu_domain smmu_domain;
  448. const struct of_device_id *match;
  449. struct sde_module_power *mp;
  450. char name[MAX_CLIENT_NAME_LEN];
  451. u32 sid = 0;
  452. if (!mdata) {
  453. SDEROT_INFO(
  454. "probe failed as mdata is not initializedi, probe defer\n");
  455. return -EPROBE_DEFER;
  456. }
  457. match = of_match_device(sde_smmu_dt_match, &pdev->dev);
  458. if (!match || !match->data) {
  459. SDEROT_ERR("probe failed as match data is invalid\n");
  460. return -EINVAL;
  461. }
  462. smmu_domain = *(struct sde_smmu_domain *) (match->data);
  463. if (smmu_domain.domain >= SDE_IOMMU_MAX_DOMAIN) {
  464. SDEROT_ERR("no matching device found\n");
  465. return -EINVAL;
  466. }
  467. if (of_find_property(pdev->dev.of_node, "iommus", NULL)) {
  468. dev = &pdev->dev;
  469. rc = of_property_read_u32_index(pdev->dev.of_node, "iommus",
  470. 1, &sid);
  471. if (rc)
  472. SDEROT_DBG("SID not defined for domain:%d",
  473. smmu_domain.domain);
  474. } else {
  475. SDEROT_ERR("Invalid SMMU ctx for domain:%d\n",
  476. smmu_domain.domain);
  477. return -EINVAL;
  478. }
  479. sde_smmu = &mdata->sde_smmu[smmu_domain.domain];
  480. sde_smmu->domain = smmu_domain.domain;
  481. sde_smmu->sid = sid;
  482. mp = &sde_smmu->mp;
  483. memset(mp, 0, sizeof(struct sde_module_power));
  484. if (of_find_property(pdev->dev.of_node,
  485. "gdsc-mdss-supply", NULL)) {
  486. mp->vreg_config = devm_kzalloc(&pdev->dev,
  487. sizeof(struct sde_vreg), GFP_KERNEL);
  488. if (!mp->vreg_config)
  489. return -ENOMEM;
  490. strlcpy(mp->vreg_config->vreg_name, "gdsc-mdss",
  491. sizeof(mp->vreg_config->vreg_name));
  492. mp->num_vreg = 1;
  493. }
  494. if (mp->vreg_config) {
  495. rc = sde_rot_config_vreg(&pdev->dev, mp->vreg_config,
  496. mp->num_vreg, true);
  497. if (rc) {
  498. SDEROT_ERR("vreg config failed rc=%d\n", rc);
  499. goto release_vreg;
  500. }
  501. }
  502. rc = sde_smmu_clk_register(pdev, mp);
  503. if (rc) {
  504. SDEROT_ERR(
  505. "smmu clk register failed for domain[%d] with err:%d\n",
  506. smmu_domain.domain, rc);
  507. goto disable_vreg;
  508. }
  509. snprintf(name, MAX_CLIENT_NAME_LEN, "smmu:%u", smmu_domain.domain);
  510. sde_smmu->reg_bus_clt = sde_reg_bus_vote_client_create(name);
  511. if (IS_ERR_OR_NULL(sde_smmu->reg_bus_clt)) {
  512. SDEROT_ERR("mdss bus client register failed\n");
  513. rc = PTR_ERR(sde_smmu->reg_bus_clt);
  514. sde_smmu->reg_bus_clt = NULL;
  515. goto unregister_clk;
  516. }
  517. rc = sde_smmu_enable_power(sde_smmu, true);
  518. if (rc) {
  519. SDEROT_ERR("power enable failed - domain:[%d] rc:%d\n",
  520. smmu_domain.domain, rc);
  521. goto bus_client_destroy;
  522. }
  523. sde_smmu->dev = &pdev->dev;
  524. sde_smmu->rot_domain = iommu_get_domain_for_dev(sde_smmu->dev);
  525. if (!sde_smmu->rot_domain) {
  526. dev_err(&pdev->dev, "iommu get domain failed\n");
  527. return -EINVAL;
  528. }
  529. if (!dev->dma_parms)
  530. dev->dma_parms = devm_kzalloc(dev,
  531. sizeof(*dev->dma_parms), GFP_KERNEL);
  532. dma_set_max_seg_size(dev, DMA_BIT_MASK(32));
  533. dma_set_seg_boundary(dev, (unsigned long)DMA_BIT_MASK(64));
  534. iommu_set_fault_handler(sde_smmu->rot_domain,
  535. sde_smmu_fault_handler, (void *)sde_smmu);
  536. sde_smmu_enable_power(sde_smmu, false);
  537. SDEROT_INFO(
  538. "iommu v2 domain[%d] mapping and clk register successful!\n",
  539. smmu_domain.domain);
  540. return 0;
  541. bus_client_destroy:
  542. sde_reg_bus_vote_client_destroy(sde_smmu->reg_bus_clt);
  543. sde_smmu->reg_bus_clt = NULL;
  544. unregister_clk:
  545. disable_vreg:
  546. sde_rot_config_vreg(&pdev->dev, sde_smmu->mp.vreg_config,
  547. sde_smmu->mp.num_vreg, false);
  548. release_vreg:
  549. devm_kfree(&pdev->dev, sde_smmu->mp.vreg_config);
  550. sde_smmu->mp.vreg_config = NULL;
  551. sde_smmu->mp.num_vreg = 0;
  552. return rc;
  553. }
  554. int sde_smmu_remove(struct platform_device *pdev)
  555. {
  556. int i;
  557. struct sde_smmu_client *sde_smmu;
  558. for (i = 0; i < SDE_IOMMU_MAX_DOMAIN; i++) {
  559. sde_smmu = sde_smmu_get_cb(i);
  560. if (!sde_smmu || !sde_smmu->dev ||
  561. (sde_smmu->dev != &pdev->dev))
  562. continue;
  563. sde_smmu->dev = NULL;
  564. sde_smmu->rot_domain = NULL;
  565. sde_smmu_enable_power(sde_smmu, false);
  566. sde_reg_bus_vote_client_destroy(sde_smmu->reg_bus_clt);
  567. sde_smmu->reg_bus_clt = NULL;
  568. sde_rot_config_vreg(&pdev->dev, sde_smmu->mp.vreg_config,
  569. sde_smmu->mp.num_vreg, false);
  570. devm_kfree(&pdev->dev, sde_smmu->mp.vreg_config);
  571. sde_smmu->mp.vreg_config = NULL;
  572. sde_smmu->mp.num_vreg = 0;
  573. }
  574. return 0;
  575. }
  576. static struct platform_driver sde_smmu_driver = {
  577. .probe = sde_smmu_probe,
  578. .remove = sde_smmu_remove,
  579. .shutdown = NULL,
  580. .driver = {
  581. .name = "sde_smmu",
  582. .of_match_table = sde_smmu_dt_match,
  583. },
  584. };
  585. void sde_rotator_smmu_driver_register(void)
  586. {
  587. platform_driver_register(&sde_smmu_driver);
  588. }
  589. void sde_rotator_smmu_driver_unregister(void)
  590. {
  591. platform_driver_unregister(&sde_smmu_driver);
  592. }