sde_rotator_smmu.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
  4. */
  5. #define pr_fmt(fmt) "%s: " fmt, __func__
  6. #include <linux/clk.h>
  7. #include <linux/debugfs.h>
  8. #include <linux/kernel.h>
  9. #include <linux/module.h>
  10. #include <linux/iommu.h>
  11. #include <linux/of.h>
  12. #include <linux/of_address.h>
  13. #include <linux/dma-mapping.h>
  14. #include <linux/dma-buf.h>
  15. #include <linux/of_platform.h>
  16. #include <linux/msm_dma_iommu_mapping.h>
  17. #include <asm/dma-iommu.h>
  18. #include "soc/qcom/secure_buffer.h"
  19. #include "sde_rotator_base.h"
  20. #include "sde_rotator_util.h"
  21. #include "sde_rotator_io_util.h"
  22. #include "sde_rotator_smmu.h"
  23. #include "sde_rotator_debug.h"
  24. #define SMMU_SDE_ROT_SEC "qcom,smmu_sde_rot_sec"
  25. #define SMMU_SDE_ROT_UNSEC "qcom,smmu_sde_rot_unsec"
  26. #ifndef SZ_4G
  27. #define SZ_4G (((size_t) SZ_1G) * 4)
  28. #endif
  29. #ifndef SZ_2G
  30. #define SZ_2G (((size_t) SZ_1G) * 2)
  31. #endif
  32. struct sde_smmu_domain {
  33. char *ctx_name;
  34. int domain;
  35. unsigned long start;
  36. unsigned long size;
  37. };
  38. static inline bool sde_smmu_is_valid_domain_type(
  39. struct sde_rot_data_type *mdata, int domain_type)
  40. {
  41. return true;
  42. }
  43. static inline bool sde_smmu_is_valid_domain_condition(
  44. struct sde_rot_data_type *mdata,
  45. int domain_type,
  46. bool is_attach)
  47. {
  48. if (is_attach) {
  49. if (test_bit(SDE_CAPS_SEC_ATTACH_DETACH_SMMU,
  50. mdata->sde_caps_map) &&
  51. (mdata->sec_cam_en &&
  52. domain_type == SDE_IOMMU_DOMAIN_ROT_SECURE))
  53. return false;
  54. else
  55. return true;
  56. } else {
  57. if (test_bit(SDE_CAPS_SEC_ATTACH_DETACH_SMMU,
  58. mdata->sde_caps_map) &&
  59. (mdata->sec_cam_en &&
  60. domain_type == SDE_IOMMU_DOMAIN_ROT_SECURE))
  61. return true;
  62. else
  63. return false;
  64. }
  65. }
  66. struct sde_smmu_client *sde_smmu_get_cb(u32 domain)
  67. {
  68. struct sde_rot_data_type *mdata = sde_rot_get_mdata();
  69. if (!sde_smmu_is_valid_domain_type(mdata, domain))
  70. return NULL;
  71. return (domain >= SDE_IOMMU_MAX_DOMAIN) ? NULL :
  72. &mdata->sde_smmu[domain];
  73. }
  74. static int sde_smmu_util_parse_dt_clock(struct platform_device *pdev,
  75. struct sde_module_power *mp)
  76. {
  77. u32 i = 0, rc = 0;
  78. const char *clock_name;
  79. u32 clock_rate;
  80. int num_clk;
  81. num_clk = of_property_count_strings(pdev->dev.of_node,
  82. "clock-names");
  83. if (num_clk < 0) {
  84. SDEROT_DBG("clocks are not defined\n");
  85. num_clk = 0;
  86. }
  87. mp->num_clk = num_clk;
  88. mp->clk_config = devm_kzalloc(&pdev->dev,
  89. sizeof(struct sde_clk) * mp->num_clk, GFP_KERNEL);
  90. if (num_clk && !mp->clk_config) {
  91. rc = -ENOMEM;
  92. mp->num_clk = 0;
  93. goto clk_err;
  94. }
  95. for (i = 0; i < mp->num_clk; i++) {
  96. of_property_read_string_index(pdev->dev.of_node, "clock-names",
  97. i, &clock_name);
  98. strlcpy(mp->clk_config[i].clk_name, clock_name,
  99. sizeof(mp->clk_config[i].clk_name));
  100. of_property_read_u32_index(pdev->dev.of_node, "clock-rate",
  101. i, &clock_rate);
  102. mp->clk_config[i].rate = clock_rate;
  103. if (!clock_rate)
  104. mp->clk_config[i].type = SDE_CLK_AHB;
  105. else
  106. mp->clk_config[i].type = SDE_CLK_PCLK;
  107. }
  108. clk_err:
  109. return rc;
  110. }
  111. static int sde_smmu_clk_register(struct platform_device *pdev,
  112. struct sde_module_power *mp)
  113. {
  114. int i, ret;
  115. struct clk *clk;
  116. ret = sde_smmu_util_parse_dt_clock(pdev, mp);
  117. if (ret) {
  118. SDEROT_ERR("unable to parse clocks\n");
  119. return -EINVAL;
  120. }
  121. for (i = 0; i < mp->num_clk; i++) {
  122. clk = devm_clk_get(&pdev->dev,
  123. mp->clk_config[i].clk_name);
  124. if (IS_ERR(clk)) {
  125. SDEROT_ERR("unable to get clk: %s\n",
  126. mp->clk_config[i].clk_name);
  127. return PTR_ERR(clk);
  128. }
  129. mp->clk_config[i].clk = clk;
  130. }
  131. return 0;
  132. }
  133. static int sde_smmu_enable_power(struct sde_smmu_client *sde_smmu,
  134. bool enable)
  135. {
  136. int rc = 0;
  137. struct sde_module_power *mp;
  138. if (!sde_smmu)
  139. return -EINVAL;
  140. mp = &sde_smmu->mp;
  141. if (!mp->num_vreg && !mp->num_clk)
  142. return 0;
  143. if (enable) {
  144. rc = sde_rot_enable_vreg(mp->vreg_config, mp->num_vreg, true);
  145. if (rc) {
  146. SDEROT_ERR("vreg enable failed - rc:%d\n", rc);
  147. goto end;
  148. }
  149. sde_update_reg_bus_vote(sde_smmu->reg_bus_clt,
  150. VOTE_INDEX_19_MHZ);
  151. rc = sde_rot_enable_clk(mp->clk_config, mp->num_clk, true);
  152. if (rc) {
  153. SDEROT_ERR("clock enable failed - rc:%d\n", rc);
  154. sde_update_reg_bus_vote(sde_smmu->reg_bus_clt,
  155. VOTE_INDEX_DISABLE);
  156. sde_rot_enable_vreg(mp->vreg_config, mp->num_vreg,
  157. false);
  158. goto end;
  159. }
  160. } else {
  161. sde_rot_enable_clk(mp->clk_config, mp->num_clk, false);
  162. sde_update_reg_bus_vote(sde_smmu->reg_bus_clt,
  163. VOTE_INDEX_DISABLE);
  164. sde_rot_enable_vreg(mp->vreg_config, mp->num_vreg, false);
  165. }
  166. end:
  167. return rc;
  168. }
  169. /*
  170. * sde_smmu_attach()
  171. *
  172. * Associates each configured VA range with the corresponding smmu context
  173. * bank device. Enables the clks as smmu requires voting it before the usage.
  174. * And iommu attach is done only once during the initial attach and it is never
  175. * detached as smmu v2 uses a feature called 'retention'.
  176. */
  177. int sde_smmu_attach(struct sde_rot_data_type *mdata)
  178. {
  179. struct sde_smmu_client *sde_smmu;
  180. int i, rc = 0;
  181. for (i = 0; i < SDE_IOMMU_MAX_DOMAIN; i++) {
  182. if (!sde_smmu_is_valid_domain_type(mdata, i))
  183. continue;
  184. sde_smmu = sde_smmu_get_cb(i);
  185. if (sde_smmu && sde_smmu->dev) {
  186. rc = sde_smmu_enable_power(sde_smmu, true);
  187. if (rc) {
  188. SDEROT_ERR(
  189. "power enable failed - domain:[%d] rc:%d\n",
  190. i, rc);
  191. goto err;
  192. }
  193. if (!sde_smmu->domain_attached &&
  194. sde_smmu_is_valid_domain_condition(mdata,
  195. i,
  196. true)) {
  197. rc = __depr_arm_iommu_attach_device(
  198. sde_smmu->dev, sde_smmu->mmu_mapping);
  199. if (rc) {
  200. SDEROT_ERR(
  201. "iommu attach device failed for domain[%d] with err:%d\n",
  202. i, rc);
  203. sde_smmu_enable_power(sde_smmu,
  204. false);
  205. goto err;
  206. }
  207. sde_smmu->domain_attached = true;
  208. SDEROT_DBG("iommu v2 domain[%i] attached\n", i);
  209. }
  210. } else {
  211. SDEROT_DBG(
  212. "iommu device not attached for domain[%d]\n",
  213. i);
  214. }
  215. }
  216. return 0;
  217. err:
  218. for (i--; i >= 0; i--) {
  219. sde_smmu = sde_smmu_get_cb(i);
  220. if (sde_smmu && sde_smmu->dev) {
  221. __depr_arm_iommu_detach_device(sde_smmu->dev);
  222. sde_smmu_enable_power(sde_smmu, false);
  223. sde_smmu->domain_attached = false;
  224. }
  225. }
  226. return rc;
  227. }
  228. /*
  229. * sde_smmu_detach()
  230. *
  231. * Only disables the clks as it is not required to detach the iommu mapped
  232. * VA range from the device in smmu as explained in the sde_smmu_attach
  233. */
  234. int sde_smmu_detach(struct sde_rot_data_type *mdata)
  235. {
  236. struct sde_smmu_client *sde_smmu;
  237. int i;
  238. for (i = 0; i < SDE_IOMMU_MAX_DOMAIN; i++) {
  239. if (!sde_smmu_is_valid_domain_type(mdata, i))
  240. continue;
  241. sde_smmu = sde_smmu_get_cb(i);
  242. if (sde_smmu && sde_smmu->dev) {
  243. if (sde_smmu->domain_attached &&
  244. sde_smmu_is_valid_domain_condition(mdata,
  245. i, false)) {
  246. __depr_arm_iommu_detach_device(sde_smmu->dev);
  247. SDEROT_DBG("iommu domain[%i] detached\n", i);
  248. sde_smmu->domain_attached = false;
  249. }
  250. else {
  251. sde_smmu_enable_power(sde_smmu, false);
  252. }
  253. }
  254. }
  255. return 0;
  256. }
  257. int sde_smmu_get_domain_id(u32 type)
  258. {
  259. return type;
  260. }
  261. /*
  262. * sde_smmu_dma_buf_attach()
  263. *
  264. * Same as sde_smmu_dma_buf_attach except that the device is got from
  265. * the configured smmu v2 context banks.
  266. */
  267. struct dma_buf_attachment *sde_smmu_dma_buf_attach(
  268. struct dma_buf *dma_buf, struct device *dev, int domain)
  269. {
  270. struct sde_smmu_client *sde_smmu = sde_smmu_get_cb(domain);
  271. if (!sde_smmu) {
  272. SDEROT_ERR("not able to get smmu context\n");
  273. return NULL;
  274. }
  275. return dma_buf_attach(dma_buf, sde_smmu->dev);
  276. }
  277. /*
  278. * sde_smmu_map_dma_buf()
  279. *
  280. * Maps existing buffer (by struct scatterlist) into SMMU context bank device.
  281. * From which we can take the virtual address and size allocated.
  282. * msm_map_dma_buf is depricated with smmu v2 and it uses dma_map_sg instead
  283. */
  284. int sde_smmu_map_dma_buf(struct dma_buf *dma_buf,
  285. struct sg_table *table, int domain, dma_addr_t *iova,
  286. unsigned long *size, int dir)
  287. {
  288. int rc;
  289. struct sde_smmu_client *sde_smmu = sde_smmu_get_cb(domain);
  290. unsigned long attrs = 0;
  291. if (!sde_smmu) {
  292. SDEROT_ERR("not able to get smmu context\n");
  293. return -EINVAL;
  294. }
  295. rc = dma_map_sg_attrs(sde_smmu->dev, table->sgl, table->nents, dir,
  296. attrs);
  297. if (!rc) {
  298. SDEROT_ERR("dma map sg failed\n");
  299. return -ENOMEM;
  300. }
  301. *iova = table->sgl->dma_address;
  302. *size = table->sgl->dma_length;
  303. return 0;
  304. }
  305. void sde_smmu_unmap_dma_buf(struct sg_table *table, int domain,
  306. int dir, struct dma_buf *dma_buf)
  307. {
  308. struct sde_smmu_client *sde_smmu = sde_smmu_get_cb(domain);
  309. if (!sde_smmu) {
  310. SDEROT_ERR("not able to get smmu context\n");
  311. return;
  312. }
  313. dma_unmap_sg(sde_smmu->dev, table->sgl, table->nents, dir);
  314. }
  315. static DEFINE_MUTEX(sde_smmu_ref_cnt_lock);
  316. int sde_smmu_ctrl(int enable)
  317. {
  318. struct sde_rot_data_type *mdata = sde_rot_get_mdata();
  319. int rc = 0;
  320. mutex_lock(&sde_smmu_ref_cnt_lock);
  321. SDEROT_EVTLOG(__builtin_return_address(0), enable, mdata->iommu_ref_cnt,
  322. mdata->iommu_attached);
  323. SDEROT_DBG("%pS: enable:%d ref_cnt:%d attach:%d\n",
  324. __builtin_return_address(0), enable, mdata->iommu_ref_cnt,
  325. mdata->iommu_attached);
  326. if (enable) {
  327. if (!mdata->iommu_attached) {
  328. rc = sde_smmu_attach(mdata);
  329. if (!rc)
  330. mdata->iommu_attached = true;
  331. }
  332. mdata->iommu_ref_cnt++;
  333. } else {
  334. if (mdata->iommu_ref_cnt) {
  335. mdata->iommu_ref_cnt--;
  336. if (mdata->iommu_ref_cnt == 0)
  337. if (mdata->iommu_attached) {
  338. rc = sde_smmu_detach(mdata);
  339. if (!rc)
  340. mdata->iommu_attached = false;
  341. }
  342. } else {
  343. SDEROT_ERR("unbalanced iommu ref\n");
  344. }
  345. }
  346. mutex_unlock(&sde_smmu_ref_cnt_lock);
  347. if (rc < 0)
  348. return rc;
  349. else
  350. return mdata->iommu_ref_cnt;
  351. }
  352. int sde_smmu_secure_ctrl(int enable)
  353. {
  354. struct sde_rot_data_type *mdata = sde_rot_get_mdata();
  355. int rc = 0;
  356. mutex_lock(&sde_smmu_ref_cnt_lock);
  357. /*
  358. * Attach/detach secure context irrespective of ref count,
  359. * We come here only when secure camera is disabled
  360. */
  361. if (enable) {
  362. rc = sde_smmu_attach(mdata);
  363. if (!rc)
  364. mdata->iommu_attached = true;
  365. } else {
  366. rc = sde_smmu_detach(mdata);
  367. /*
  368. * keep iommu_attached equal to true,
  369. * so that driver does not attemp to attach
  370. * while in secure state
  371. */
  372. }
  373. mutex_unlock(&sde_smmu_ref_cnt_lock);
  374. return rc;
  375. }
  376. /*
  377. * sde_smmu_device_create()
  378. * @dev: sde_mdp device
  379. *
  380. * For smmu, each context bank is a separate child device of sde rot.
  381. * Platform devices are created for those smmu related child devices of
  382. * sde rot here. This would facilitate probes to happen for these devices in
  383. * which the smmu mapping and initialization is handled.
  384. */
  385. void sde_smmu_device_create(struct device *dev)
  386. {
  387. struct device_node *parent, *child;
  388. parent = dev->of_node;
  389. for_each_child_of_node(parent, child) {
  390. if (of_device_is_compatible(child, SMMU_SDE_ROT_SEC))
  391. of_platform_device_create(child, NULL, dev);
  392. else if (of_device_is_compatible(child, SMMU_SDE_ROT_UNSEC))
  393. of_platform_device_create(child, NULL, dev);
  394. }
  395. }
  396. int sde_smmu_init(struct device *dev)
  397. {
  398. sde_smmu_device_create(dev);
  399. return 0;
  400. }
  401. static int sde_smmu_fault_handler(struct iommu_domain *domain,
  402. struct device *dev, unsigned long iova,
  403. int flags, void *token)
  404. {
  405. struct sde_smmu_client *sde_smmu;
  406. int rc = -EINVAL;
  407. if (!token) {
  408. SDEROT_ERR("Error: token is NULL\n");
  409. return -EINVAL;
  410. }
  411. sde_smmu = (struct sde_smmu_client *)token;
  412. /* trigger rotator dump */
  413. SDEROT_ERR("trigger rotator dump, iova=0x%08lx, flags=0x%x\n",
  414. iova, flags);
  415. SDEROT_ERR("SMMU device:%s", sde_smmu->dev->kobj.name);
  416. /* generate dump, but no panic */
  417. SDEROT_EVTLOG_TOUT_HANDLER("rot", "rot_dbg_bus", "vbif_dbg_bus");
  418. /*
  419. * return -ENOSYS to allow smmu driver to dump out useful
  420. * debug info.
  421. */
  422. return rc;
  423. }
  424. static struct sde_smmu_domain sde_rot_unsec = {
  425. "rot_0", SDE_IOMMU_DOMAIN_ROT_UNSECURE, SZ_2G, (SZ_4G - SZ_2G)};
  426. static struct sde_smmu_domain sde_rot_sec = {
  427. "rot_1", SDE_IOMMU_DOMAIN_ROT_SECURE, SZ_2G, (SZ_4G - SZ_2G)};
  428. static const struct of_device_id sde_smmu_dt_match[] = {
  429. { .compatible = SMMU_SDE_ROT_UNSEC, .data = &sde_rot_unsec},
  430. { .compatible = SMMU_SDE_ROT_SEC, .data = &sde_rot_sec},
  431. {}
  432. };
  433. MODULE_DEVICE_TABLE(of, sde_smmu_dt_match);
  434. /*
  435. * sde_smmu_probe()
  436. * @pdev: platform device
  437. *
  438. * Each smmu context acts as a separate device and the context banks are
  439. * configured with a VA range.
  440. * Registeres the clks as each context bank has its own clks, for which voting
  441. * has to be done everytime before using that context bank.
  442. */
  443. int sde_smmu_probe(struct platform_device *pdev)
  444. {
  445. struct device *dev;
  446. struct sde_rot_data_type *mdata = sde_rot_get_mdata();
  447. struct sde_smmu_client *sde_smmu;
  448. int rc = 0;
  449. struct sde_smmu_domain smmu_domain;
  450. const struct of_device_id *match;
  451. struct sde_module_power *mp;
  452. char name[MAX_CLIENT_NAME_LEN];
  453. int mdphtw_llc_enable = 1;
  454. u32 sid = 0;
  455. bool smmu_rot_full_map;
  456. if (!mdata) {
  457. SDEROT_INFO(
  458. "probe failed as mdata is not initializedi, probe defer\n");
  459. return -EPROBE_DEFER;
  460. }
  461. match = of_match_device(sde_smmu_dt_match, &pdev->dev);
  462. if (!match || !match->data) {
  463. SDEROT_ERR("probe failed as match data is invalid\n");
  464. return -EINVAL;
  465. }
  466. smmu_domain = *(struct sde_smmu_domain *) (match->data);
  467. if (smmu_domain.domain >= SDE_IOMMU_MAX_DOMAIN) {
  468. SDEROT_ERR("no matching device found\n");
  469. return -EINVAL;
  470. }
  471. if (of_find_property(pdev->dev.of_node, "iommus", NULL)) {
  472. dev = &pdev->dev;
  473. rc = of_property_read_u32_index(pdev->dev.of_node, "iommus",
  474. 1, &sid);
  475. if (rc)
  476. SDEROT_DBG("SID not defined for domain:%d",
  477. smmu_domain.domain);
  478. } else {
  479. SDEROT_ERR("Invalid SMMU ctx for domain:%d\n",
  480. smmu_domain.domain);
  481. return -EINVAL;
  482. }
  483. sde_smmu = &mdata->sde_smmu[smmu_domain.domain];
  484. sde_smmu->domain = smmu_domain.domain;
  485. sde_smmu->sid = sid;
  486. mp = &sde_smmu->mp;
  487. memset(mp, 0, sizeof(struct sde_module_power));
  488. if (of_find_property(pdev->dev.of_node,
  489. "gdsc-mdss-supply", NULL)) {
  490. mp->vreg_config = devm_kzalloc(&pdev->dev,
  491. sizeof(struct sde_vreg), GFP_KERNEL);
  492. if (!mp->vreg_config)
  493. return -ENOMEM;
  494. strlcpy(mp->vreg_config->vreg_name, "gdsc-mdss",
  495. sizeof(mp->vreg_config->vreg_name));
  496. mp->num_vreg = 1;
  497. }
  498. if (mp->vreg_config) {
  499. rc = sde_rot_config_vreg(&pdev->dev, mp->vreg_config,
  500. mp->num_vreg, true);
  501. if (rc) {
  502. SDEROT_ERR("vreg config failed rc=%d\n", rc);
  503. goto release_vreg;
  504. }
  505. }
  506. rc = sde_smmu_clk_register(pdev, mp);
  507. if (rc) {
  508. SDEROT_ERR(
  509. "smmu clk register failed for domain[%d] with err:%d\n",
  510. smmu_domain.domain, rc);
  511. goto disable_vreg;
  512. }
  513. snprintf(name, MAX_CLIENT_NAME_LEN, "smmu:%u", smmu_domain.domain);
  514. sde_smmu->reg_bus_clt = sde_reg_bus_vote_client_create(name);
  515. if (IS_ERR_OR_NULL(sde_smmu->reg_bus_clt)) {
  516. SDEROT_ERR("mdss bus client register failed\n");
  517. rc = PTR_ERR(sde_smmu->reg_bus_clt);
  518. sde_smmu->reg_bus_clt = NULL;
  519. goto unregister_clk;
  520. }
  521. rc = sde_smmu_enable_power(sde_smmu, true);
  522. if (rc) {
  523. SDEROT_ERR("power enable failed - domain:[%d] rc:%d\n",
  524. smmu_domain.domain, rc);
  525. goto bus_client_destroy;
  526. }
  527. smmu_rot_full_map = of_property_read_bool(dev->of_node,
  528. "qcom,fullsize-va-map");
  529. if (smmu_rot_full_map) {
  530. smmu_domain.start = SZ_128K;
  531. smmu_domain.size = SZ_4G - SZ_128K;
  532. }
  533. sde_smmu->mmu_mapping = __depr_arm_iommu_create_mapping(
  534. &platform_bus_type, smmu_domain.start, smmu_domain.size);
  535. if (IS_ERR(sde_smmu->mmu_mapping)) {
  536. SDEROT_ERR("iommu create mapping failed for domain[%d]\n",
  537. smmu_domain.domain);
  538. rc = PTR_ERR(sde_smmu->mmu_mapping);
  539. sde_smmu->mmu_mapping = NULL;
  540. goto disable_power;
  541. }
  542. rc = iommu_domain_set_attr(sde_smmu->mmu_mapping->domain,
  543. DOMAIN_ATTR_USE_UPSTREAM_HINT, &mdphtw_llc_enable);
  544. if (rc) {
  545. SDEROT_ERR("couldn't enable rot pagetable walks: %d\n", rc);
  546. goto release_mapping;
  547. }
  548. if (smmu_domain.domain == SDE_IOMMU_DOMAIN_ROT_SECURE) {
  549. int secure_vmid = VMID_CP_PIXEL;
  550. rc = iommu_domain_set_attr(sde_smmu->mmu_mapping->domain,
  551. DOMAIN_ATTR_SECURE_VMID, &secure_vmid);
  552. if (rc) {
  553. SDEROT_ERR("couldn't set secure pixel vmid\n");
  554. goto release_mapping;
  555. }
  556. }
  557. if (!dev->dma_parms)
  558. dev->dma_parms = devm_kzalloc(dev,
  559. sizeof(*dev->dma_parms), GFP_KERNEL);
  560. dma_set_max_seg_size(dev, DMA_BIT_MASK(32));
  561. dma_set_seg_boundary(dev, DMA_BIT_MASK(64));
  562. iommu_set_fault_handler(sde_smmu->mmu_mapping->domain,
  563. sde_smmu_fault_handler, (void *)sde_smmu);
  564. sde_smmu_enable_power(sde_smmu, false);
  565. sde_smmu->dev = dev;
  566. SDEROT_INFO(
  567. "iommu v2 domain[%d] mapping and clk register successful!\n",
  568. smmu_domain.domain);
  569. return 0;
  570. release_mapping:
  571. __depr_arm_iommu_release_mapping(sde_smmu->mmu_mapping);
  572. sde_smmu->mmu_mapping = NULL;
  573. disable_power:
  574. sde_smmu_enable_power(sde_smmu, false);
  575. bus_client_destroy:
  576. sde_reg_bus_vote_client_destroy(sde_smmu->reg_bus_clt);
  577. sde_smmu->reg_bus_clt = NULL;
  578. unregister_clk:
  579. disable_vreg:
  580. sde_rot_config_vreg(&pdev->dev, sde_smmu->mp.vreg_config,
  581. sde_smmu->mp.num_vreg, false);
  582. release_vreg:
  583. devm_kfree(&pdev->dev, sde_smmu->mp.vreg_config);
  584. sde_smmu->mp.vreg_config = NULL;
  585. sde_smmu->mp.num_vreg = 0;
  586. return rc;
  587. }
  588. int sde_smmu_remove(struct platform_device *pdev)
  589. {
  590. int i;
  591. struct sde_smmu_client *sde_smmu;
  592. for (i = 0; i < SDE_IOMMU_MAX_DOMAIN; i++) {
  593. sde_smmu = sde_smmu_get_cb(i);
  594. if (!sde_smmu || !sde_smmu->dev ||
  595. (sde_smmu->dev != &pdev->dev))
  596. continue;
  597. sde_smmu->dev = NULL;
  598. __depr_arm_iommu_release_mapping(sde_smmu->mmu_mapping);
  599. sde_smmu->mmu_mapping = NULL;
  600. sde_smmu_enable_power(sde_smmu, false);
  601. sde_reg_bus_vote_client_destroy(sde_smmu->reg_bus_clt);
  602. sde_smmu->reg_bus_clt = NULL;
  603. sde_rot_config_vreg(&pdev->dev, sde_smmu->mp.vreg_config,
  604. sde_smmu->mp.num_vreg, false);
  605. devm_kfree(&pdev->dev, sde_smmu->mp.vreg_config);
  606. sde_smmu->mp.vreg_config = NULL;
  607. sde_smmu->mp.num_vreg = 0;
  608. }
  609. return 0;
  610. }
  611. static struct platform_driver sde_smmu_driver = {
  612. .probe = sde_smmu_probe,
  613. .remove = sde_smmu_remove,
  614. .shutdown = NULL,
  615. .driver = {
  616. .name = "sde_smmu",
  617. .of_match_table = sde_smmu_dt_match,
  618. },
  619. };
  620. static int sde_smmu_register_driver(void)
  621. {
  622. return platform_driver_register(&sde_smmu_driver);
  623. }
  624. static int __init sde_smmu_driver_init(void)
  625. {
  626. int ret;
  627. ret = sde_smmu_register_driver();
  628. if (ret)
  629. SDEROT_ERR("sde_smmu_register_driver() failed!\n");
  630. return ret;
  631. }
  632. module_init(sde_smmu_driver_init);
  633. static void __exit sde_smmu_driver_cleanup(void)
  634. {
  635. platform_driver_unregister(&sde_smmu_driver);
  636. }
  637. module_exit(sde_smmu_driver_cleanup);
  638. MODULE_LICENSE("GPL v2");
  639. MODULE_DESCRIPTION("SDE SMMU driver");