sde_rotator_smmu.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
  4. * Copyright (c) 2015-2021, The Linux Foundation. All rights reserved.
  5. */
  6. #define pr_fmt(fmt) "%s: " fmt, __func__
  7. #include <linux/clk.h>
  8. #include <linux/debugfs.h>
  9. #include <linux/kernel.h>
  10. #include <linux/version.h>
  11. #include <linux/module.h>
  12. #include <linux/iommu.h>
  13. #include <linux/of.h>
  14. #include <linux/of_address.h>
  15. #include <linux/dma-mapping.h>
  16. #include <linux/dma-buf.h>
  17. #include <linux/of_platform.h>
  18. #include <linux/msm_dma_iommu_mapping.h>
  19. #include <linux/qcom-iommu-util.h>
  20. #include "soc/qcom/secure_buffer.h"
  21. #include "sde_rotator_base.h"
  22. #include "sde_rotator_util.h"
  23. #include "sde_rotator_io_util.h"
  24. #include "sde_rotator_smmu.h"
  25. #include "sde_rotator_debug.h"
  26. #define SMMU_SDE_ROT_SEC "qcom,smmu_sde_rot_sec"
  27. #define SMMU_SDE_ROT_UNSEC "qcom,smmu_sde_rot_unsec"
  28. struct sde_smmu_domain {
  29. char *ctx_name;
  30. int domain;
  31. };
  32. static inline bool sde_smmu_is_valid_domain_type(
  33. struct sde_rot_data_type *mdata, int domain_type)
  34. {
  35. return true;
  36. }
  37. static inline bool sde_smmu_is_valid_domain_condition(
  38. struct sde_rot_data_type *mdata,
  39. int domain_type,
  40. bool is_attach)
  41. {
  42. if (is_attach) {
  43. if (test_bit(SDE_CAPS_SEC_ATTACH_DETACH_SMMU,
  44. mdata->sde_caps_map) &&
  45. (mdata->sec_cam_en &&
  46. domain_type == SDE_IOMMU_DOMAIN_ROT_SECURE))
  47. return false;
  48. else
  49. return true;
  50. } else {
  51. if (test_bit(SDE_CAPS_SEC_ATTACH_DETACH_SMMU,
  52. mdata->sde_caps_map) &&
  53. (mdata->sec_cam_en &&
  54. domain_type == SDE_IOMMU_DOMAIN_ROT_SECURE))
  55. return true;
  56. else
  57. return false;
  58. }
  59. }
  60. struct sde_smmu_client *sde_smmu_get_cb(u32 domain)
  61. {
  62. struct sde_rot_data_type *mdata = sde_rot_get_mdata();
  63. if (!sde_smmu_is_valid_domain_type(mdata, domain))
  64. return NULL;
  65. return (domain >= SDE_IOMMU_MAX_DOMAIN) ? NULL :
  66. &mdata->sde_smmu[domain];
  67. }
  68. static int sde_smmu_util_parse_dt_clock(struct platform_device *pdev,
  69. struct sde_module_power *mp)
  70. {
  71. u32 i = 0, rc = 0;
  72. const char *clock_name;
  73. u32 clock_rate;
  74. int num_clk;
  75. num_clk = of_property_count_strings(pdev->dev.of_node,
  76. "clock-names");
  77. if (num_clk < 0) {
  78. SDEROT_DBG("clocks are not defined\n");
  79. num_clk = 0;
  80. }
  81. mp->num_clk = num_clk;
  82. mp->clk_config = devm_kzalloc(&pdev->dev,
  83. sizeof(struct sde_clk) * mp->num_clk, GFP_KERNEL);
  84. if (num_clk && !mp->clk_config) {
  85. rc = -ENOMEM;
  86. mp->num_clk = 0;
  87. goto clk_err;
  88. }
  89. for (i = 0; i < mp->num_clk; i++) {
  90. of_property_read_string_index(pdev->dev.of_node, "clock-names",
  91. i, &clock_name);
  92. strlcpy(mp->clk_config[i].clk_name, clock_name,
  93. sizeof(mp->clk_config[i].clk_name));
  94. of_property_read_u32_index(pdev->dev.of_node, "clock-rate",
  95. i, &clock_rate);
  96. mp->clk_config[i].rate = clock_rate;
  97. if (!clock_rate)
  98. mp->clk_config[i].type = SDE_CLK_AHB;
  99. else
  100. mp->clk_config[i].type = SDE_CLK_PCLK;
  101. }
  102. clk_err:
  103. return rc;
  104. }
  105. static int sde_smmu_clk_register(struct platform_device *pdev,
  106. struct sde_module_power *mp)
  107. {
  108. int i, ret;
  109. struct clk *clk;
  110. ret = sde_smmu_util_parse_dt_clock(pdev, mp);
  111. if (ret) {
  112. SDEROT_ERR("unable to parse clocks\n");
  113. return -EINVAL;
  114. }
  115. for (i = 0; i < mp->num_clk; i++) {
  116. clk = devm_clk_get(&pdev->dev,
  117. mp->clk_config[i].clk_name);
  118. if (IS_ERR(clk)) {
  119. SDEROT_ERR("unable to get clk: %s\n",
  120. mp->clk_config[i].clk_name);
  121. return PTR_ERR(clk);
  122. }
  123. mp->clk_config[i].clk = clk;
  124. }
  125. return 0;
  126. }
  127. static int sde_smmu_enable_power(struct sde_smmu_client *sde_smmu,
  128. bool enable)
  129. {
  130. int rc = 0;
  131. struct sde_module_power *mp;
  132. if (!sde_smmu)
  133. return -EINVAL;
  134. mp = &sde_smmu->mp;
  135. if (!mp->num_vreg && !mp->num_clk)
  136. return 0;
  137. if (enable) {
  138. rc = sde_rot_enable_vreg(mp->vreg_config, mp->num_vreg, true);
  139. if (rc) {
  140. SDEROT_ERR("vreg enable failed - rc:%d\n", rc);
  141. goto end;
  142. }
  143. sde_update_reg_bus_vote(sde_smmu->reg_bus_clt,
  144. VOTE_INDEX_76_MHZ);
  145. rc = sde_rot_enable_clk(mp->clk_config, mp->num_clk, true);
  146. if (rc) {
  147. SDEROT_ERR("clock enable failed - rc:%d\n", rc);
  148. sde_update_reg_bus_vote(sde_smmu->reg_bus_clt,
  149. VOTE_INDEX_DISABLE);
  150. sde_rot_enable_vreg(mp->vreg_config, mp->num_vreg,
  151. false);
  152. goto end;
  153. }
  154. } else {
  155. sde_rot_enable_clk(mp->clk_config, mp->num_clk, false);
  156. sde_update_reg_bus_vote(sde_smmu->reg_bus_clt,
  157. VOTE_INDEX_DISABLE);
  158. sde_rot_enable_vreg(mp->vreg_config, mp->num_vreg, false);
  159. }
  160. end:
  161. return rc;
  162. }
  163. /*
  164. * sde_smmu_attach()
  165. *
  166. * Associates each configured VA range with the corresponding smmu context
  167. * bank device. Enables the clks as smmu requires voting it before the usage.
  168. * And iommu attach is done only once during the initial attach and it is never
  169. * detached as smmu v2 uses a feature called 'retention'.
  170. */
  171. int sde_smmu_attach(struct sde_rot_data_type *mdata)
  172. {
  173. struct sde_smmu_client *sde_smmu;
  174. int i, rc = 0;
  175. for (i = 0; i < SDE_IOMMU_MAX_DOMAIN; i++) {
  176. if (!sde_smmu_is_valid_domain_type(mdata, i))
  177. continue;
  178. sde_smmu = sde_smmu_get_cb(i);
  179. if (sde_smmu && sde_smmu->dev) {
  180. rc = sde_smmu_enable_power(sde_smmu, true);
  181. if (rc) {
  182. SDEROT_ERR(
  183. "power enable failed - domain:[%d] rc:%d\n",
  184. i, rc);
  185. goto err;
  186. }
  187. if (!sde_smmu->domain_attached &&
  188. sde_smmu_is_valid_domain_condition(mdata,
  189. i,
  190. true)) {
  191. rc = qcom_iommu_sid_switch(sde_smmu->dev, SID_ACQUIRE);
  192. if (rc) {
  193. SDEROT_ERR(
  194. "iommu sid switch failed for domain[%d] with err:%d\n",
  195. i, rc);
  196. sde_smmu_enable_power(sde_smmu,
  197. false);
  198. goto err;
  199. }
  200. sde_smmu->domain_attached = true;
  201. SDEROT_DBG("iommu v2 domain[%i] attached\n", i);
  202. }
  203. } else {
  204. SDEROT_DBG(
  205. "iommu device not attached for domain[%d]\n",
  206. i);
  207. }
  208. }
  209. return 0;
  210. err:
  211. for (i--; i >= 0; i--) {
  212. sde_smmu = sde_smmu_get_cb(i);
  213. if (sde_smmu && sde_smmu->dev) {
  214. iommu_detach_device(sde_smmu->rot_domain,
  215. sde_smmu->dev);
  216. sde_smmu_enable_power(sde_smmu, false);
  217. sde_smmu->domain_attached = false;
  218. }
  219. }
  220. return rc;
  221. }
  222. /*
  223. * sde_smmu_detach()
  224. *
  225. * Only disables the clks as it is not required to detach the iommu mapped
  226. * VA range from the device in smmu as explained in the sde_smmu_attach
  227. */
  228. int sde_smmu_detach(struct sde_rot_data_type *mdata)
  229. {
  230. struct sde_smmu_client *sde_smmu;
  231. int i, rc;
  232. for (i = 0; i < SDE_IOMMU_MAX_DOMAIN; i++) {
  233. if (!sde_smmu_is_valid_domain_type(mdata, i))
  234. continue;
  235. sde_smmu = sde_smmu_get_cb(i);
  236. if (sde_smmu && sde_smmu->dev) {
  237. if (sde_smmu->domain_attached &&
  238. sde_smmu_is_valid_domain_condition(mdata,
  239. i, false)) {
  240. rc = qcom_iommu_sid_switch(sde_smmu->dev, SID_RELEASE);
  241. if (rc)
  242. SDEROT_ERR("iommu sid switch failed (%d)\n", rc);
  243. else {
  244. SDEROT_DBG("iommu domain[%i] detached\n", i);
  245. sde_smmu->domain_attached = false;
  246. }
  247. } else {
  248. sde_smmu_enable_power(sde_smmu, false);
  249. }
  250. }
  251. }
  252. return 0;
  253. }
  254. int sde_smmu_get_domain_id(u32 type)
  255. {
  256. return type;
  257. }
  258. /*
  259. * sde_smmu_dma_buf_attach()
  260. *
  261. * Same as sde_smmu_dma_buf_attach except that the device is got from
  262. * the configured smmu v2 context banks.
  263. */
  264. struct dma_buf_attachment *sde_smmu_dma_buf_attach(
  265. struct dma_buf *dma_buf, struct device *dev, int domain)
  266. {
  267. struct sde_smmu_client *sde_smmu = sde_smmu_get_cb(domain);
  268. if (!sde_smmu) {
  269. SDEROT_ERR("not able to get smmu context\n");
  270. return NULL;
  271. }
  272. return dma_buf_attach(dma_buf, sde_smmu->dev);
  273. }
  274. /*
  275. * sde_smmu_map_dma_buf()
  276. *
  277. * Maps existing buffer (by struct scatterlist) into SMMU context bank device.
  278. * From which we can take the virtual address and size allocated.
  279. * msm_map_dma_buf is depricated with smmu v2 and it uses dma_map_sg instead
  280. */
  281. int sde_smmu_map_dma_buf(struct dma_buf *dma_buf,
  282. struct sg_table *table, int domain, dma_addr_t *iova,
  283. unsigned long *size, int dir)
  284. {
  285. int rc;
  286. struct sde_smmu_client *sde_smmu = sde_smmu_get_cb(domain);
  287. unsigned long attrs = 0;
  288. if (!sde_smmu) {
  289. SDEROT_ERR("not able to get smmu context\n");
  290. return -EINVAL;
  291. }
  292. rc = dma_map_sg_attrs(sde_smmu->dev, table->sgl, table->nents, dir,
  293. attrs);
  294. if (!rc) {
  295. SDEROT_ERR("dma map sg failed\n");
  296. return -ENOMEM;
  297. }
  298. *iova = table->sgl->dma_address;
  299. *size = table->sgl->dma_length;
  300. return 0;
  301. }
  302. void sde_smmu_unmap_dma_buf(struct sg_table *table, int domain,
  303. int dir, struct dma_buf *dma_buf)
  304. {
  305. struct sde_smmu_client *sde_smmu = sde_smmu_get_cb(domain);
  306. if (!sde_smmu) {
  307. SDEROT_ERR("not able to get smmu context\n");
  308. return;
  309. }
  310. dma_unmap_sg(sde_smmu->dev, table->sgl, table->nents, dir);
  311. }
  312. static DEFINE_MUTEX(sde_smmu_ref_cnt_lock);
  313. int sde_smmu_ctrl(int enable)
  314. {
  315. struct sde_rot_data_type *mdata = sde_rot_get_mdata();
  316. int rc = 0;
  317. mutex_lock(&sde_smmu_ref_cnt_lock);
  318. SDEROT_EVTLOG(__builtin_return_address(0), enable, mdata->iommu_ref_cnt,
  319. mdata->iommu_attached);
  320. SDEROT_DBG("%pS: enable:%d ref_cnt:%d attach:%d\n",
  321. __builtin_return_address(0), enable, mdata->iommu_ref_cnt,
  322. mdata->iommu_attached);
  323. if (enable) {
  324. if (!mdata->iommu_attached) {
  325. rc = sde_smmu_attach(mdata);
  326. if (!rc)
  327. mdata->iommu_attached = true;
  328. }
  329. mdata->iommu_ref_cnt++;
  330. } else {
  331. if (mdata->iommu_ref_cnt) {
  332. mdata->iommu_ref_cnt--;
  333. if (mdata->iommu_ref_cnt == 0)
  334. if (mdata->iommu_attached) {
  335. rc = sde_smmu_detach(mdata);
  336. if (!rc)
  337. mdata->iommu_attached = false;
  338. }
  339. } else {
  340. SDEROT_ERR("unbalanced iommu ref\n");
  341. }
  342. }
  343. mutex_unlock(&sde_smmu_ref_cnt_lock);
  344. if (rc < 0)
  345. return rc;
  346. else
  347. return mdata->iommu_ref_cnt;
  348. }
  349. int sde_smmu_secure_ctrl(int enable)
  350. {
  351. struct sde_rot_data_type *mdata = sde_rot_get_mdata();
  352. int rc = 0;
  353. mutex_lock(&sde_smmu_ref_cnt_lock);
  354. /*
  355. * Attach/detach secure context irrespective of ref count,
  356. * We come here only when secure camera is disabled
  357. */
  358. if (enable) {
  359. rc = sde_smmu_attach(mdata);
  360. if (!rc)
  361. mdata->iommu_attached = true;
  362. } else {
  363. rc = sde_smmu_detach(mdata);
  364. /*
  365. * keep iommu_attached equal to true,
  366. * so that driver does not attemp to attach
  367. * while in secure state
  368. */
  369. }
  370. mutex_unlock(&sde_smmu_ref_cnt_lock);
  371. return rc;
  372. }
  373. /*
  374. * sde_smmu_device_create()
  375. * @dev: sde_mdp device
  376. *
  377. * For smmu, each context bank is a separate child device of sde rot.
  378. * Platform devices are created for those smmu related child devices of
  379. * sde rot here. This would facilitate probes to happen for these devices in
  380. * which the smmu mapping and initialization is handled.
  381. */
  382. void sde_smmu_device_create(struct device *dev)
  383. {
  384. struct device_node *parent, *child;
  385. struct sde_rot_data_type *mdata = sde_rot_get_mdata();
  386. parent = dev->of_node;
  387. for_each_child_of_node(parent, child) {
  388. if (of_device_is_compatible(child, SMMU_SDE_ROT_SEC)) {
  389. of_platform_device_create(child, NULL, dev);
  390. mdata->sde_smmu
  391. [SDE_IOMMU_DOMAIN_ROT_SECURE].domain_attached = true;
  392. } else if (of_device_is_compatible(child, SMMU_SDE_ROT_UNSEC)) {
  393. of_platform_device_create(child, NULL, dev);
  394. mdata->sde_smmu
  395. [SDE_IOMMU_DOMAIN_ROT_UNSECURE].domain_attached = true;
  396. }
  397. }
  398. }
  399. int sde_smmu_init(struct device *dev)
  400. {
  401. sde_smmu_device_create(dev);
  402. return 0;
  403. }
  404. static int sde_smmu_fault_handler(struct iommu_domain *domain,
  405. struct device *dev, unsigned long iova,
  406. int flags, void *token)
  407. {
  408. struct sde_smmu_client *sde_smmu;
  409. int rc = -EINVAL;
  410. if (!token) {
  411. SDEROT_ERR("Error: token is NULL\n");
  412. return -EINVAL;
  413. }
  414. sde_smmu = (struct sde_smmu_client *)token;
  415. /* trigger rotator dump */
  416. SDEROT_ERR("trigger rotator dump, iova=0x%08lx, flags=0x%x\n",
  417. iova, flags);
  418. SDEROT_ERR("SMMU device:%s", sde_smmu->dev->kobj.name);
  419. /* generate dump, but no panic */
  420. SDEROT_EVTLOG_TOUT_HANDLER("rot", "rot_dbg_bus", "vbif_dbg_bus");
  421. /*
  422. * return -ENOSYS to allow smmu driver to dump out useful
  423. * debug info.
  424. */
  425. return rc;
  426. }
  427. static struct sde_smmu_domain sde_rot_unsec = {
  428. "rot_0", SDE_IOMMU_DOMAIN_ROT_UNSECURE};
  429. static struct sde_smmu_domain sde_rot_sec = {
  430. "rot_1", SDE_IOMMU_DOMAIN_ROT_SECURE};
  431. static const struct of_device_id sde_smmu_dt_match[] = {
  432. { .compatible = SMMU_SDE_ROT_UNSEC, .data = &sde_rot_unsec},
  433. { .compatible = SMMU_SDE_ROT_SEC, .data = &sde_rot_sec},
  434. {}
  435. };
  436. /*
  437. * sde_smmu_probe()
  438. * @pdev: platform device
  439. *
  440. * Each smmu context acts as a separate device and the context banks are
  441. * configured with a VA range.
  442. * Registeres the clks as each context bank has its own clks, for which voting
  443. * has to be done everytime before using that context bank.
  444. */
  445. int sde_smmu_probe(struct platform_device *pdev)
  446. {
  447. struct device *dev;
  448. struct sde_rot_data_type *mdata = sde_rot_get_mdata();
  449. struct sde_smmu_client *sde_smmu;
  450. int rc = 0;
  451. struct sde_smmu_domain smmu_domain;
  452. const struct of_device_id *match;
  453. struct sde_module_power *mp;
  454. char name[MAX_CLIENT_NAME_LEN];
  455. u32 sid = 0;
  456. if (!mdata) {
  457. SDEROT_INFO(
  458. "probe failed as mdata is not initializedi, probe defer\n");
  459. return -EPROBE_DEFER;
  460. }
  461. match = of_match_device(sde_smmu_dt_match, &pdev->dev);
  462. if (!match || !match->data) {
  463. SDEROT_ERR("probe failed as match data is invalid\n");
  464. return -EINVAL;
  465. }
  466. smmu_domain = *(struct sde_smmu_domain *) (match->data);
  467. if (smmu_domain.domain >= SDE_IOMMU_MAX_DOMAIN) {
  468. SDEROT_ERR("no matching device found\n");
  469. return -EINVAL;
  470. }
  471. if (of_find_property(pdev->dev.of_node, "iommus", NULL)) {
  472. dev = &pdev->dev;
  473. rc = of_property_read_u32_index(pdev->dev.of_node, "iommus",
  474. 1, &sid);
  475. if (rc)
  476. SDEROT_DBG("SID not defined for domain:%d",
  477. smmu_domain.domain);
  478. } else {
  479. SDEROT_ERR("Invalid SMMU ctx for domain:%d\n",
  480. smmu_domain.domain);
  481. return -EINVAL;
  482. }
  483. sde_smmu = &mdata->sde_smmu[smmu_domain.domain];
  484. sde_smmu->domain = smmu_domain.domain;
  485. sde_smmu->sid = sid;
  486. mp = &sde_smmu->mp;
  487. memset(mp, 0, sizeof(struct sde_module_power));
  488. if (of_find_property(pdev->dev.of_node,
  489. "gdsc-mdss-supply", NULL)) {
  490. mp->vreg_config = devm_kzalloc(&pdev->dev,
  491. sizeof(struct sde_vreg), GFP_KERNEL);
  492. if (!mp->vreg_config)
  493. return -ENOMEM;
  494. strlcpy(mp->vreg_config->vreg_name, "gdsc-mdss",
  495. sizeof(mp->vreg_config->vreg_name));
  496. mp->num_vreg = 1;
  497. }
  498. if (mp->vreg_config) {
  499. rc = sde_rot_config_vreg(&pdev->dev, mp->vreg_config,
  500. mp->num_vreg, true);
  501. if (rc) {
  502. SDEROT_ERR("vreg config failed rc=%d\n", rc);
  503. goto release_vreg;
  504. }
  505. }
  506. rc = sde_smmu_clk_register(pdev, mp);
  507. if (rc) {
  508. SDEROT_ERR(
  509. "smmu clk register failed for domain[%d] with err:%d\n",
  510. smmu_domain.domain, rc);
  511. goto disable_vreg;
  512. }
  513. snprintf(name, MAX_CLIENT_NAME_LEN, "smmu:%u", smmu_domain.domain);
  514. sde_smmu->reg_bus_clt = sde_reg_bus_vote_client_create(name);
  515. if (IS_ERR_OR_NULL(sde_smmu->reg_bus_clt)) {
  516. SDEROT_ERR("mdss bus client register failed\n");
  517. rc = PTR_ERR(sde_smmu->reg_bus_clt);
  518. sde_smmu->reg_bus_clt = NULL;
  519. goto unregister_clk;
  520. }
  521. rc = sde_smmu_enable_power(sde_smmu, true);
  522. if (rc) {
  523. SDEROT_ERR("power enable failed - domain:[%d] rc:%d\n",
  524. smmu_domain.domain, rc);
  525. goto bus_client_destroy;
  526. }
  527. sde_smmu->dev = &pdev->dev;
  528. sde_smmu->rot_domain = iommu_get_domain_for_dev(sde_smmu->dev);
  529. if (!sde_smmu->rot_domain) {
  530. dev_err(&pdev->dev, "iommu get domain failed\n");
  531. return -EINVAL;
  532. }
  533. if (!dev->dma_parms)
  534. dev->dma_parms = devm_kzalloc(dev,
  535. sizeof(*dev->dma_parms), GFP_KERNEL);
  536. dma_set_max_seg_size(dev, DMA_BIT_MASK(32));
  537. dma_set_seg_boundary(dev, (unsigned long)DMA_BIT_MASK(64));
  538. iommu_set_fault_handler(sde_smmu->rot_domain,
  539. sde_smmu_fault_handler, (void *)sde_smmu);
  540. sde_smmu_enable_power(sde_smmu, false);
  541. SDEROT_INFO(
  542. "iommu v2 domain[%d] mapping and clk register successful!\n",
  543. smmu_domain.domain);
  544. return 0;
  545. bus_client_destroy:
  546. sde_reg_bus_vote_client_destroy(sde_smmu->reg_bus_clt);
  547. sde_smmu->reg_bus_clt = NULL;
  548. unregister_clk:
  549. disable_vreg:
  550. sde_rot_config_vreg(&pdev->dev, sde_smmu->mp.vreg_config,
  551. sde_smmu->mp.num_vreg, false);
  552. release_vreg:
  553. devm_kfree(&pdev->dev, sde_smmu->mp.vreg_config);
  554. sde_smmu->mp.vreg_config = NULL;
  555. sde_smmu->mp.num_vreg = 0;
  556. return rc;
  557. }
  558. int sde_smmu_remove(struct platform_device *pdev)
  559. {
  560. int i;
  561. struct sde_smmu_client *sde_smmu;
  562. for (i = 0; i < SDE_IOMMU_MAX_DOMAIN; i++) {
  563. sde_smmu = sde_smmu_get_cb(i);
  564. if (!sde_smmu || !sde_smmu->dev ||
  565. (sde_smmu->dev != &pdev->dev))
  566. continue;
  567. sde_smmu->dev = NULL;
  568. sde_smmu->rot_domain = NULL;
  569. sde_smmu_enable_power(sde_smmu, false);
  570. sde_reg_bus_vote_client_destroy(sde_smmu->reg_bus_clt);
  571. sde_smmu->reg_bus_clt = NULL;
  572. sde_rot_config_vreg(&pdev->dev, sde_smmu->mp.vreg_config,
  573. sde_smmu->mp.num_vreg, false);
  574. devm_kfree(&pdev->dev, sde_smmu->mp.vreg_config);
  575. sde_smmu->mp.vreg_config = NULL;
  576. sde_smmu->mp.num_vreg = 0;
  577. }
  578. return 0;
  579. }
  580. static struct platform_driver sde_smmu_driver = {
  581. .probe = sde_smmu_probe,
  582. .remove = sde_smmu_remove,
  583. .shutdown = NULL,
  584. .driver = {
  585. .name = "sde_smmu",
  586. .of_match_table = sde_smmu_dt_match,
  587. },
  588. };
  589. void sde_rotator_smmu_driver_register(void)
  590. {
  591. platform_driver_register(&sde_smmu_driver);
  592. }
  593. void sde_rotator_smmu_driver_unregister(void)
  594. {
  595. platform_driver_unregister(&sde_smmu_driver);
  596. }
  597. #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 19, 0))
  598. MODULE_IMPORT_NS(DMA_BUF);
  599. #endif