sde_rotator_smmu.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2015-2020, The Linux Foundation. All rights reserved.
  4. */
  5. #define pr_fmt(fmt) "%s: " fmt, __func__
  6. #include <linux/clk.h>
  7. #include <linux/debugfs.h>
  8. #include <linux/kernel.h>
  9. #include <linux/module.h>
  10. #include <linux/iommu.h>
  11. #include <linux/of.h>
  12. #include <linux/of_address.h>
  13. #include <linux/dma-mapping.h>
  14. #include <linux/dma-buf.h>
  15. #include <linux/of_platform.h>
  16. #include <linux/msm_dma_iommu_mapping.h>
  17. #include <asm/dma-iommu.h>
  18. #include "soc/qcom/secure_buffer.h"
  19. #include "sde_rotator_base.h"
  20. #include "sde_rotator_util.h"
  21. #include "sde_rotator_io_util.h"
  22. #include "sde_rotator_smmu.h"
  23. #include "sde_rotator_debug.h"
  24. #define SMMU_SDE_ROT_SEC "qcom,smmu_sde_rot_sec"
  25. #define SMMU_SDE_ROT_UNSEC "qcom,smmu_sde_rot_unsec"
  26. struct sde_smmu_domain {
  27. char *ctx_name;
  28. int domain;
  29. };
  30. static inline bool sde_smmu_is_valid_domain_type(
  31. struct sde_rot_data_type *mdata, int domain_type)
  32. {
  33. return true;
  34. }
  35. static inline bool sde_smmu_is_valid_domain_condition(
  36. struct sde_rot_data_type *mdata,
  37. int domain_type,
  38. bool is_attach)
  39. {
  40. if (is_attach) {
  41. if (test_bit(SDE_CAPS_SEC_ATTACH_DETACH_SMMU,
  42. mdata->sde_caps_map) &&
  43. (mdata->sec_cam_en &&
  44. domain_type == SDE_IOMMU_DOMAIN_ROT_SECURE))
  45. return false;
  46. else
  47. return true;
  48. } else {
  49. if (test_bit(SDE_CAPS_SEC_ATTACH_DETACH_SMMU,
  50. mdata->sde_caps_map) &&
  51. (mdata->sec_cam_en &&
  52. domain_type == SDE_IOMMU_DOMAIN_ROT_SECURE))
  53. return true;
  54. else
  55. return false;
  56. }
  57. }
  58. struct sde_smmu_client *sde_smmu_get_cb(u32 domain)
  59. {
  60. struct sde_rot_data_type *mdata = sde_rot_get_mdata();
  61. if (!sde_smmu_is_valid_domain_type(mdata, domain))
  62. return NULL;
  63. return (domain >= SDE_IOMMU_MAX_DOMAIN) ? NULL :
  64. &mdata->sde_smmu[domain];
  65. }
  66. static int sde_smmu_util_parse_dt_clock(struct platform_device *pdev,
  67. struct sde_module_power *mp)
  68. {
  69. u32 i = 0, rc = 0;
  70. const char *clock_name;
  71. u32 clock_rate;
  72. int num_clk;
  73. num_clk = of_property_count_strings(pdev->dev.of_node,
  74. "clock-names");
  75. if (num_clk < 0) {
  76. SDEROT_DBG("clocks are not defined\n");
  77. num_clk = 0;
  78. }
  79. mp->num_clk = num_clk;
  80. mp->clk_config = devm_kzalloc(&pdev->dev,
  81. sizeof(struct sde_clk) * mp->num_clk, GFP_KERNEL);
  82. if (num_clk && !mp->clk_config) {
  83. rc = -ENOMEM;
  84. mp->num_clk = 0;
  85. goto clk_err;
  86. }
  87. for (i = 0; i < mp->num_clk; i++) {
  88. of_property_read_string_index(pdev->dev.of_node, "clock-names",
  89. i, &clock_name);
  90. strlcpy(mp->clk_config[i].clk_name, clock_name,
  91. sizeof(mp->clk_config[i].clk_name));
  92. of_property_read_u32_index(pdev->dev.of_node, "clock-rate",
  93. i, &clock_rate);
  94. mp->clk_config[i].rate = clock_rate;
  95. if (!clock_rate)
  96. mp->clk_config[i].type = SDE_CLK_AHB;
  97. else
  98. mp->clk_config[i].type = SDE_CLK_PCLK;
  99. }
  100. clk_err:
  101. return rc;
  102. }
  103. static int sde_smmu_clk_register(struct platform_device *pdev,
  104. struct sde_module_power *mp)
  105. {
  106. int i, ret;
  107. struct clk *clk;
  108. ret = sde_smmu_util_parse_dt_clock(pdev, mp);
  109. if (ret) {
  110. SDEROT_ERR("unable to parse clocks\n");
  111. return -EINVAL;
  112. }
  113. for (i = 0; i < mp->num_clk; i++) {
  114. clk = devm_clk_get(&pdev->dev,
  115. mp->clk_config[i].clk_name);
  116. if (IS_ERR(clk)) {
  117. SDEROT_ERR("unable to get clk: %s\n",
  118. mp->clk_config[i].clk_name);
  119. return PTR_ERR(clk);
  120. }
  121. mp->clk_config[i].clk = clk;
  122. }
  123. return 0;
  124. }
  125. static int sde_smmu_enable_power(struct sde_smmu_client *sde_smmu,
  126. bool enable)
  127. {
  128. int rc = 0;
  129. struct sde_module_power *mp;
  130. if (!sde_smmu)
  131. return -EINVAL;
  132. mp = &sde_smmu->mp;
  133. if (!mp->num_vreg && !mp->num_clk)
  134. return 0;
  135. if (enable) {
  136. rc = sde_rot_enable_vreg(mp->vreg_config, mp->num_vreg, true);
  137. if (rc) {
  138. SDEROT_ERR("vreg enable failed - rc:%d\n", rc);
  139. goto end;
  140. }
  141. sde_update_reg_bus_vote(sde_smmu->reg_bus_clt,
  142. VOTE_INDEX_76_MHZ);
  143. rc = sde_rot_enable_clk(mp->clk_config, mp->num_clk, true);
  144. if (rc) {
  145. SDEROT_ERR("clock enable failed - rc:%d\n", rc);
  146. sde_update_reg_bus_vote(sde_smmu->reg_bus_clt,
  147. VOTE_INDEX_DISABLE);
  148. sde_rot_enable_vreg(mp->vreg_config, mp->num_vreg,
  149. false);
  150. goto end;
  151. }
  152. } else {
  153. sde_rot_enable_clk(mp->clk_config, mp->num_clk, false);
  154. sde_update_reg_bus_vote(sde_smmu->reg_bus_clt,
  155. VOTE_INDEX_DISABLE);
  156. sde_rot_enable_vreg(mp->vreg_config, mp->num_vreg, false);
  157. }
  158. end:
  159. return rc;
  160. }
  161. /*
  162. * sde_smmu_attach()
  163. *
  164. * Associates each configured VA range with the corresponding smmu context
  165. * bank device. Enables the clks as smmu requires voting it before the usage.
  166. * And iommu attach is done only once during the initial attach and it is never
  167. * detached as smmu v2 uses a feature called 'retention'.
  168. */
  169. int sde_smmu_attach(struct sde_rot_data_type *mdata)
  170. {
  171. struct sde_smmu_client *sde_smmu;
  172. int i, rc = 0;
  173. for (i = 0; i < SDE_IOMMU_MAX_DOMAIN; i++) {
  174. if (!sde_smmu_is_valid_domain_type(mdata, i))
  175. continue;
  176. sde_smmu = sde_smmu_get_cb(i);
  177. if (sde_smmu && sde_smmu->dev) {
  178. rc = sde_smmu_enable_power(sde_smmu, true);
  179. if (rc) {
  180. SDEROT_ERR(
  181. "power enable failed - domain:[%d] rc:%d\n",
  182. i, rc);
  183. goto err;
  184. }
  185. if (!sde_smmu->domain_attached &&
  186. sde_smmu_is_valid_domain_condition(mdata,
  187. i,
  188. true)) {
  189. rc = iommu_attach_device(
  190. sde_smmu->rot_domain, sde_smmu->dev);
  191. if (rc) {
  192. SDEROT_ERR(
  193. "iommu attach device failed for domain[%d] with err:%d\n",
  194. i, rc);
  195. sde_smmu_enable_power(sde_smmu,
  196. false);
  197. goto err;
  198. }
  199. sde_smmu->domain_attached = true;
  200. SDEROT_DBG("iommu v2 domain[%i] attached\n", i);
  201. }
  202. } else {
  203. SDEROT_DBG(
  204. "iommu device not attached for domain[%d]\n",
  205. i);
  206. }
  207. }
  208. return 0;
  209. err:
  210. for (i--; i >= 0; i--) {
  211. sde_smmu = sde_smmu_get_cb(i);
  212. if (sde_smmu && sde_smmu->dev) {
  213. iommu_detach_device(sde_smmu->rot_domain,
  214. sde_smmu->dev);
  215. sde_smmu_enable_power(sde_smmu, false);
  216. sde_smmu->domain_attached = false;
  217. }
  218. }
  219. return rc;
  220. }
  221. /*
  222. * sde_smmu_detach()
  223. *
  224. * Only disables the clks as it is not required to detach the iommu mapped
  225. * VA range from the device in smmu as explained in the sde_smmu_attach
  226. */
  227. int sde_smmu_detach(struct sde_rot_data_type *mdata)
  228. {
  229. struct sde_smmu_client *sde_smmu;
  230. int i;
  231. for (i = 0; i < SDE_IOMMU_MAX_DOMAIN; i++) {
  232. if (!sde_smmu_is_valid_domain_type(mdata, i))
  233. continue;
  234. sde_smmu = sde_smmu_get_cb(i);
  235. if (sde_smmu && sde_smmu->dev) {
  236. if (sde_smmu->domain_attached &&
  237. sde_smmu_is_valid_domain_condition(mdata,
  238. i, false)) {
  239. iommu_detach_device(sde_smmu->rot_domain,
  240. sde_smmu->dev);
  241. SDEROT_DBG("iommu domain[%i] detached\n", i);
  242. sde_smmu->domain_attached = false;
  243. }
  244. else {
  245. sde_smmu_enable_power(sde_smmu, false);
  246. }
  247. }
  248. }
  249. return 0;
  250. }
  251. int sde_smmu_get_domain_id(u32 type)
  252. {
  253. return type;
  254. }
  255. /*
  256. * sde_smmu_dma_buf_attach()
  257. *
  258. * Same as sde_smmu_dma_buf_attach except that the device is got from
  259. * the configured smmu v2 context banks.
  260. */
  261. struct dma_buf_attachment *sde_smmu_dma_buf_attach(
  262. struct dma_buf *dma_buf, struct device *dev, int domain)
  263. {
  264. struct sde_smmu_client *sde_smmu = sde_smmu_get_cb(domain);
  265. if (!sde_smmu) {
  266. SDEROT_ERR("not able to get smmu context\n");
  267. return NULL;
  268. }
  269. return dma_buf_attach(dma_buf, sde_smmu->dev);
  270. }
  271. /*
  272. * sde_smmu_map_dma_buf()
  273. *
  274. * Maps existing buffer (by struct scatterlist) into SMMU context bank device.
  275. * From which we can take the virtual address and size allocated.
  276. * msm_map_dma_buf is depricated with smmu v2 and it uses dma_map_sg instead
  277. */
  278. int sde_smmu_map_dma_buf(struct dma_buf *dma_buf,
  279. struct sg_table *table, int domain, dma_addr_t *iova,
  280. unsigned long *size, int dir)
  281. {
  282. int rc;
  283. struct sde_smmu_client *sde_smmu = sde_smmu_get_cb(domain);
  284. unsigned long attrs = 0;
  285. if (!sde_smmu) {
  286. SDEROT_ERR("not able to get smmu context\n");
  287. return -EINVAL;
  288. }
  289. rc = dma_map_sg_attrs(sde_smmu->dev, table->sgl, table->nents, dir,
  290. attrs);
  291. if (!rc) {
  292. SDEROT_ERR("dma map sg failed\n");
  293. return -ENOMEM;
  294. }
  295. *iova = table->sgl->dma_address;
  296. *size = table->sgl->dma_length;
  297. return 0;
  298. }
  299. void sde_smmu_unmap_dma_buf(struct sg_table *table, int domain,
  300. int dir, struct dma_buf *dma_buf)
  301. {
  302. struct sde_smmu_client *sde_smmu = sde_smmu_get_cb(domain);
  303. if (!sde_smmu) {
  304. SDEROT_ERR("not able to get smmu context\n");
  305. return;
  306. }
  307. dma_unmap_sg(sde_smmu->dev, table->sgl, table->nents, dir);
  308. }
  309. static DEFINE_MUTEX(sde_smmu_ref_cnt_lock);
  310. int sde_smmu_ctrl(int enable)
  311. {
  312. struct sde_rot_data_type *mdata = sde_rot_get_mdata();
  313. int rc = 0;
  314. mutex_lock(&sde_smmu_ref_cnt_lock);
  315. SDEROT_EVTLOG(__builtin_return_address(0), enable, mdata->iommu_ref_cnt,
  316. mdata->iommu_attached);
  317. SDEROT_DBG("%pS: enable:%d ref_cnt:%d attach:%d\n",
  318. __builtin_return_address(0), enable, mdata->iommu_ref_cnt,
  319. mdata->iommu_attached);
  320. if (enable) {
  321. if (!mdata->iommu_attached) {
  322. rc = sde_smmu_attach(mdata);
  323. if (!rc)
  324. mdata->iommu_attached = true;
  325. }
  326. mdata->iommu_ref_cnt++;
  327. } else {
  328. if (mdata->iommu_ref_cnt) {
  329. mdata->iommu_ref_cnt--;
  330. if (mdata->iommu_ref_cnt == 0)
  331. if (mdata->iommu_attached) {
  332. rc = sde_smmu_detach(mdata);
  333. if (!rc)
  334. mdata->iommu_attached = false;
  335. }
  336. } else {
  337. SDEROT_ERR("unbalanced iommu ref\n");
  338. }
  339. }
  340. mutex_unlock(&sde_smmu_ref_cnt_lock);
  341. if (rc < 0)
  342. return rc;
  343. else
  344. return mdata->iommu_ref_cnt;
  345. }
  346. int sde_smmu_secure_ctrl(int enable)
  347. {
  348. struct sde_rot_data_type *mdata = sde_rot_get_mdata();
  349. int rc = 0;
  350. mutex_lock(&sde_smmu_ref_cnt_lock);
  351. /*
  352. * Attach/detach secure context irrespective of ref count,
  353. * We come here only when secure camera is disabled
  354. */
  355. if (enable) {
  356. rc = sde_smmu_attach(mdata);
  357. if (!rc)
  358. mdata->iommu_attached = true;
  359. } else {
  360. rc = sde_smmu_detach(mdata);
  361. /*
  362. * keep iommu_attached equal to true,
  363. * so that driver does not attemp to attach
  364. * while in secure state
  365. */
  366. }
  367. mutex_unlock(&sde_smmu_ref_cnt_lock);
  368. return rc;
  369. }
  370. /*
  371. * sde_smmu_device_create()
  372. * @dev: sde_mdp device
  373. *
  374. * For smmu, each context bank is a separate child device of sde rot.
  375. * Platform devices are created for those smmu related child devices of
  376. * sde rot here. This would facilitate probes to happen for these devices in
  377. * which the smmu mapping and initialization is handled.
  378. */
  379. void sde_smmu_device_create(struct device *dev)
  380. {
  381. struct device_node *parent, *child;
  382. struct sde_rot_data_type *mdata = sde_rot_get_mdata();
  383. parent = dev->of_node;
  384. for_each_child_of_node(parent, child) {
  385. if (of_device_is_compatible(child, SMMU_SDE_ROT_SEC)) {
  386. of_platform_device_create(child, NULL, dev);
  387. mdata->sde_smmu
  388. [SDE_IOMMU_DOMAIN_ROT_SECURE].domain_attached = true;
  389. } else if (of_device_is_compatible(child, SMMU_SDE_ROT_UNSEC)) {
  390. of_platform_device_create(child, NULL, dev);
  391. mdata->sde_smmu
  392. [SDE_IOMMU_DOMAIN_ROT_UNSECURE].domain_attached = true;
  393. }
  394. }
  395. }
  396. int sde_smmu_init(struct device *dev)
  397. {
  398. sde_smmu_device_create(dev);
  399. return 0;
  400. }
  401. static int sde_smmu_fault_handler(struct iommu_domain *domain,
  402. struct device *dev, unsigned long iova,
  403. int flags, void *token)
  404. {
  405. struct sde_smmu_client *sde_smmu;
  406. int rc = -EINVAL;
  407. if (!token) {
  408. SDEROT_ERR("Error: token is NULL\n");
  409. return -EINVAL;
  410. }
  411. sde_smmu = (struct sde_smmu_client *)token;
  412. /* trigger rotator dump */
  413. SDEROT_ERR("trigger rotator dump, iova=0x%08lx, flags=0x%x\n",
  414. iova, flags);
  415. SDEROT_ERR("SMMU device:%s", sde_smmu->dev->kobj.name);
  416. /* generate dump, but no panic */
  417. SDEROT_EVTLOG_TOUT_HANDLER("rot", "rot_dbg_bus", "vbif_dbg_bus");
  418. /*
  419. * return -ENOSYS to allow smmu driver to dump out useful
  420. * debug info.
  421. */
  422. return rc;
  423. }
  424. static struct sde_smmu_domain sde_rot_unsec = {
  425. "rot_0", SDE_IOMMU_DOMAIN_ROT_UNSECURE};
  426. static struct sde_smmu_domain sde_rot_sec = {
  427. "rot_1", SDE_IOMMU_DOMAIN_ROT_SECURE};
  428. static const struct of_device_id sde_smmu_dt_match[] = {
  429. { .compatible = SMMU_SDE_ROT_UNSEC, .data = &sde_rot_unsec},
  430. { .compatible = SMMU_SDE_ROT_SEC, .data = &sde_rot_sec},
  431. {}
  432. };
  433. MODULE_DEVICE_TABLE(of, sde_smmu_dt_match);
  434. /*
  435. * sde_smmu_probe()
  436. * @pdev: platform device
  437. *
  438. * Each smmu context acts as a separate device and the context banks are
  439. * configured with a VA range.
  440. * Registeres the clks as each context bank has its own clks, for which voting
  441. * has to be done everytime before using that context bank.
  442. */
  443. int sde_smmu_probe(struct platform_device *pdev)
  444. {
  445. struct device *dev;
  446. struct sde_rot_data_type *mdata = sde_rot_get_mdata();
  447. struct sde_smmu_client *sde_smmu;
  448. int rc = 0;
  449. struct sde_smmu_domain smmu_domain;
  450. const struct of_device_id *match;
  451. struct sde_module_power *mp;
  452. char name[MAX_CLIENT_NAME_LEN];
  453. u32 sid = 0;
  454. if (!mdata) {
  455. SDEROT_INFO(
  456. "probe failed as mdata is not initializedi, probe defer\n");
  457. return -EPROBE_DEFER;
  458. }
  459. match = of_match_device(sde_smmu_dt_match, &pdev->dev);
  460. if (!match || !match->data) {
  461. SDEROT_ERR("probe failed as match data is invalid\n");
  462. return -EINVAL;
  463. }
  464. smmu_domain = *(struct sde_smmu_domain *) (match->data);
  465. if (smmu_domain.domain >= SDE_IOMMU_MAX_DOMAIN) {
  466. SDEROT_ERR("no matching device found\n");
  467. return -EINVAL;
  468. }
  469. if (of_find_property(pdev->dev.of_node, "iommus", NULL)) {
  470. dev = &pdev->dev;
  471. rc = of_property_read_u32_index(pdev->dev.of_node, "iommus",
  472. 1, &sid);
  473. if (rc)
  474. SDEROT_DBG("SID not defined for domain:%d",
  475. smmu_domain.domain);
  476. } else {
  477. SDEROT_ERR("Invalid SMMU ctx for domain:%d\n",
  478. smmu_domain.domain);
  479. return -EINVAL;
  480. }
  481. sde_smmu = &mdata->sde_smmu[smmu_domain.domain];
  482. sde_smmu->domain = smmu_domain.domain;
  483. sde_smmu->sid = sid;
  484. mp = &sde_smmu->mp;
  485. memset(mp, 0, sizeof(struct sde_module_power));
  486. if (of_find_property(pdev->dev.of_node,
  487. "gdsc-mdss-supply", NULL)) {
  488. mp->vreg_config = devm_kzalloc(&pdev->dev,
  489. sizeof(struct sde_vreg), GFP_KERNEL);
  490. if (!mp->vreg_config)
  491. return -ENOMEM;
  492. strlcpy(mp->vreg_config->vreg_name, "gdsc-mdss",
  493. sizeof(mp->vreg_config->vreg_name));
  494. mp->num_vreg = 1;
  495. }
  496. if (mp->vreg_config) {
  497. rc = sde_rot_config_vreg(&pdev->dev, mp->vreg_config,
  498. mp->num_vreg, true);
  499. if (rc) {
  500. SDEROT_ERR("vreg config failed rc=%d\n", rc);
  501. goto release_vreg;
  502. }
  503. }
  504. rc = sde_smmu_clk_register(pdev, mp);
  505. if (rc) {
  506. SDEROT_ERR(
  507. "smmu clk register failed for domain[%d] with err:%d\n",
  508. smmu_domain.domain, rc);
  509. goto disable_vreg;
  510. }
  511. snprintf(name, MAX_CLIENT_NAME_LEN, "smmu:%u", smmu_domain.domain);
  512. sde_smmu->reg_bus_clt = sde_reg_bus_vote_client_create(name);
  513. if (IS_ERR_OR_NULL(sde_smmu->reg_bus_clt)) {
  514. SDEROT_ERR("mdss bus client register failed\n");
  515. rc = PTR_ERR(sde_smmu->reg_bus_clt);
  516. sde_smmu->reg_bus_clt = NULL;
  517. goto unregister_clk;
  518. }
  519. rc = sde_smmu_enable_power(sde_smmu, true);
  520. if (rc) {
  521. SDEROT_ERR("power enable failed - domain:[%d] rc:%d\n",
  522. smmu_domain.domain, rc);
  523. goto bus_client_destroy;
  524. }
  525. sde_smmu->dev = &pdev->dev;
  526. sde_smmu->rot_domain = iommu_get_domain_for_dev(sde_smmu->dev);
  527. if (!sde_smmu->rot_domain) {
  528. dev_err(&pdev->dev, "iommu get domain failed\n");
  529. return -EINVAL;
  530. }
  531. if (!dev->dma_parms)
  532. dev->dma_parms = devm_kzalloc(dev,
  533. sizeof(*dev->dma_parms), GFP_KERNEL);
  534. dma_set_max_seg_size(dev, DMA_BIT_MASK(32));
  535. dma_set_seg_boundary(dev, (unsigned long)DMA_BIT_MASK(64));
  536. iommu_set_fault_handler(sde_smmu->rot_domain,
  537. sde_smmu_fault_handler, (void *)sde_smmu);
  538. sde_smmu_enable_power(sde_smmu, false);
  539. SDEROT_INFO(
  540. "iommu v2 domain[%d] mapping and clk register successful!\n",
  541. smmu_domain.domain);
  542. return 0;
  543. bus_client_destroy:
  544. sde_reg_bus_vote_client_destroy(sde_smmu->reg_bus_clt);
  545. sde_smmu->reg_bus_clt = NULL;
  546. unregister_clk:
  547. disable_vreg:
  548. sde_rot_config_vreg(&pdev->dev, sde_smmu->mp.vreg_config,
  549. sde_smmu->mp.num_vreg, false);
  550. release_vreg:
  551. devm_kfree(&pdev->dev, sde_smmu->mp.vreg_config);
  552. sde_smmu->mp.vreg_config = NULL;
  553. sde_smmu->mp.num_vreg = 0;
  554. return rc;
  555. }
  556. int sde_smmu_remove(struct platform_device *pdev)
  557. {
  558. int i;
  559. struct sde_smmu_client *sde_smmu;
  560. for (i = 0; i < SDE_IOMMU_MAX_DOMAIN; i++) {
  561. sde_smmu = sde_smmu_get_cb(i);
  562. if (!sde_smmu || !sde_smmu->dev ||
  563. (sde_smmu->dev != &pdev->dev))
  564. continue;
  565. sde_smmu->dev = NULL;
  566. sde_smmu->rot_domain = NULL;
  567. sde_smmu_enable_power(sde_smmu, false);
  568. sde_reg_bus_vote_client_destroy(sde_smmu->reg_bus_clt);
  569. sde_smmu->reg_bus_clt = NULL;
  570. sde_rot_config_vreg(&pdev->dev, sde_smmu->mp.vreg_config,
  571. sde_smmu->mp.num_vreg, false);
  572. devm_kfree(&pdev->dev, sde_smmu->mp.vreg_config);
  573. sde_smmu->mp.vreg_config = NULL;
  574. sde_smmu->mp.num_vreg = 0;
  575. }
  576. return 0;
  577. }
  578. static struct platform_driver sde_smmu_driver = {
  579. .probe = sde_smmu_probe,
  580. .remove = sde_smmu_remove,
  581. .shutdown = NULL,
  582. .driver = {
  583. .name = "sde_smmu",
  584. .of_match_table = sde_smmu_dt_match,
  585. },
  586. };
  587. static int sde_smmu_register_driver(void)
  588. {
  589. return platform_driver_register(&sde_smmu_driver);
  590. }
  591. static int __init sde_smmu_driver_init(void)
  592. {
  593. int ret;
  594. ret = sde_smmu_register_driver();
  595. if (ret)
  596. SDEROT_ERR("sde_smmu_register_driver() failed!\n");
  597. return ret;
  598. }
  599. module_init(sde_smmu_driver_init);
  600. static void __exit sde_smmu_driver_cleanup(void)
  601. {
  602. platform_driver_unregister(&sde_smmu_driver);
  603. }
  604. module_exit(sde_smmu_driver_cleanup);
  605. MODULE_LICENSE("GPL v2");
  606. MODULE_DESCRIPTION("SDE SMMU driver");