arm-smmu-qcom-pm.c 7.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2021, The Linux Foundation. All rights reserved.
  4. */
  5. #include <linux/clk.h>
  6. #include <linux/regulator/consumer.h>
  7. #include <linux/interconnect.h>
  8. #include <linux/of_platform.h>
  9. #include <linux/iopoll.h>
  10. #include "arm-smmu.h"
  11. #define ARM_SMMU_ICC_AVG_BW 0
  12. #define ARM_SMMU_ICC_PEAK_BW_HIGH 1000
  13. #define ARM_SMMU_ICC_PEAK_BW_LOW 0
  14. #define ARM_SMMU_ICC_ACTIVE_ONLY_TAG 0x3
  15. /*
  16. * Theoretically, our interconnect does not guarantee the order between
  17. * writes to different "register blocks" even with device memory type.
  18. * It does guarantee that the completion of a read to a particular
  19. * register block implies that previously issued writes to that
  20. * register block have completed, with device memory type.
  21. *
  22. * In particular, we need to ensure that writes to iommu registers
  23. * complete before we turn off the power.
  24. */
  25. static void arm_smmu_arch_write_sync(struct arm_smmu_device *smmu)
  26. {
  27. u32 id;
  28. if (!smmu)
  29. return;
  30. /* Read to complete prior write transcations */
  31. id = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_ID0);
  32. /* Wait for read to complete before off */
  33. rmb();
  34. }
  35. static int arm_smmu_prepare_clocks(struct arm_smmu_power_resources *pwr)
  36. {
  37. int i, ret = 0;
  38. for (i = 0; i < pwr->num_clocks; ++i) {
  39. ret = clk_prepare(pwr->clocks[i]);
  40. if (ret) {
  41. dev_err(pwr->dev, "Couldn't prepare clock #%d\n", i);
  42. while (i--)
  43. clk_unprepare(pwr->clocks[i]);
  44. break;
  45. }
  46. }
  47. return ret;
  48. }
  49. static void arm_smmu_unprepare_clocks(struct arm_smmu_power_resources *pwr)
  50. {
  51. int i;
  52. for (i = pwr->num_clocks; i; --i)
  53. clk_unprepare(pwr->clocks[i - 1]);
  54. }
  55. static int arm_smmu_enable_clocks(struct arm_smmu_power_resources *pwr)
  56. {
  57. int i, ret = 0;
  58. for (i = 0; i < pwr->num_clocks; ++i) {
  59. ret = clk_enable(pwr->clocks[i]);
  60. if (ret) {
  61. dev_err(pwr->dev, "Couldn't enable clock #%d\n", i);
  62. while (i--)
  63. clk_disable(pwr->clocks[i]);
  64. break;
  65. }
  66. }
  67. return ret;
  68. }
  69. static void arm_smmu_disable_clocks(struct arm_smmu_power_resources *pwr)
  70. {
  71. int i;
  72. for (i = pwr->num_clocks; i; --i)
  73. clk_disable(pwr->clocks[i - 1]);
  74. }
  75. static int arm_smmu_raise_interconnect_bw(struct arm_smmu_power_resources *pwr)
  76. {
  77. if (!pwr->icc_path)
  78. return 0;
  79. return icc_set_bw(pwr->icc_path, ARM_SMMU_ICC_AVG_BW,
  80. ARM_SMMU_ICC_PEAK_BW_HIGH);
  81. }
  82. static void arm_smmu_lower_interconnect_bw(struct arm_smmu_power_resources *pwr)
  83. {
  84. if (!pwr->icc_path)
  85. return;
  86. WARN_ON(icc_set_bw(pwr->icc_path, ARM_SMMU_ICC_AVG_BW,
  87. ARM_SMMU_ICC_PEAK_BW_LOW));
  88. }
  89. static int arm_smmu_enable_regulators(struct arm_smmu_power_resources *pwr)
  90. {
  91. struct regulator_bulk_data *consumers;
  92. int num_consumers, ret;
  93. int i;
  94. num_consumers = pwr->num_gdscs;
  95. consumers = pwr->gdscs;
  96. for (i = 0; i < num_consumers; i++) {
  97. ret = regulator_enable(consumers[i].consumer);
  98. if (ret)
  99. goto out;
  100. }
  101. return 0;
  102. out:
  103. i -= 1;
  104. for (; i >= 0; i--)
  105. regulator_disable(consumers[i].consumer);
  106. return ret;
  107. }
  108. int arm_smmu_power_on(struct arm_smmu_power_resources *pwr)
  109. {
  110. int ret;
  111. mutex_lock(&pwr->power_lock);
  112. if (pwr->power_count > 0) {
  113. pwr->power_count += 1;
  114. mutex_unlock(&pwr->power_lock);
  115. return 0;
  116. }
  117. ret = arm_smmu_raise_interconnect_bw(pwr);
  118. if (ret)
  119. goto out_unlock;
  120. ret = arm_smmu_enable_regulators(pwr);
  121. if (ret)
  122. goto out_disable_bus;
  123. ret = arm_smmu_prepare_clocks(pwr);
  124. if (ret)
  125. goto out_disable_regulators;
  126. ret = arm_smmu_enable_clocks(pwr);
  127. if (ret)
  128. goto out_unprepare_clocks;
  129. if (pwr->resume) {
  130. ret = pwr->resume(pwr);
  131. if (ret)
  132. goto out_disable_clocks;
  133. }
  134. pwr->power_count = 1;
  135. mutex_unlock(&pwr->power_lock);
  136. return 0;
  137. out_disable_clocks:
  138. arm_smmu_disable_clocks(pwr);
  139. out_unprepare_clocks:
  140. arm_smmu_unprepare_clocks(pwr);
  141. out_disable_regulators:
  142. regulator_bulk_disable(pwr->num_gdscs, pwr->gdscs);
  143. out_disable_bus:
  144. arm_smmu_lower_interconnect_bw(pwr);
  145. out_unlock:
  146. mutex_unlock(&pwr->power_lock);
  147. return ret;
  148. }
  149. /*
  150. * Needing to pass smmu to this api for arm_smmu_arch_write_sync is awkward.
  151. */
  152. void arm_smmu_power_off(struct arm_smmu_device *smmu,
  153. struct arm_smmu_power_resources *pwr)
  154. {
  155. mutex_lock(&pwr->power_lock);
  156. if (pwr->power_count == 0) {
  157. WARN(1, "%s: Bad power count\n", dev_name(pwr->dev));
  158. mutex_unlock(&pwr->power_lock);
  159. return;
  160. } else if (pwr->power_count > 1) {
  161. pwr->power_count--;
  162. mutex_unlock(&pwr->power_lock);
  163. return;
  164. }
  165. if (pwr->suspend)
  166. pwr->suspend(pwr);
  167. arm_smmu_arch_write_sync(smmu);
  168. arm_smmu_disable_clocks(pwr);
  169. arm_smmu_unprepare_clocks(pwr);
  170. regulator_bulk_disable(pwr->num_gdscs, pwr->gdscs);
  171. arm_smmu_lower_interconnect_bw(pwr);
  172. pwr->power_count = 0;
  173. mutex_unlock(&pwr->power_lock);
  174. }
  175. static int arm_smmu_init_clocks(struct arm_smmu_power_resources *pwr)
  176. {
  177. const char *cname;
  178. struct property *prop;
  179. int i;
  180. struct device *dev = pwr->dev;
  181. pwr->num_clocks =
  182. of_property_count_strings(dev->of_node, "clock-names");
  183. if (pwr->num_clocks < 1) {
  184. pwr->num_clocks = 0;
  185. return 0;
  186. }
  187. pwr->clocks = devm_kzalloc(
  188. dev, sizeof(*pwr->clocks) * pwr->num_clocks,
  189. GFP_KERNEL);
  190. if (!pwr->clocks)
  191. return -ENOMEM;
  192. i = 0;
  193. of_property_for_each_string(dev->of_node, "clock-names",
  194. prop, cname) {
  195. struct clk *c = devm_clk_get(dev, cname);
  196. if (IS_ERR(c)) {
  197. dev_err(dev, "Couldn't get clock: %s\n",
  198. cname);
  199. return PTR_ERR(c);
  200. }
  201. if (clk_get_rate(c) == 0) {
  202. long rate = clk_round_rate(c, 1000);
  203. clk_set_rate(c, rate);
  204. }
  205. pwr->clocks[i] = c;
  206. ++i;
  207. }
  208. return 0;
  209. }
  210. static int arm_smmu_init_regulators(struct arm_smmu_power_resources *pwr)
  211. {
  212. const char *cname;
  213. struct property *prop;
  214. int i;
  215. struct device *dev = pwr->dev;
  216. pwr->num_gdscs =
  217. of_property_count_strings(dev->of_node, "qcom,regulator-names");
  218. if (pwr->num_gdscs < 1) {
  219. pwr->num_gdscs = 0;
  220. return 0;
  221. }
  222. pwr->gdscs = devm_kzalloc(
  223. dev, sizeof(*pwr->gdscs) * pwr->num_gdscs, GFP_KERNEL);
  224. if (!pwr->gdscs)
  225. return -ENOMEM;
  226. i = 0;
  227. of_property_for_each_string(dev->of_node, "qcom,regulator-names",
  228. prop, cname)
  229. pwr->gdscs[i++].supply = cname;
  230. return devm_regulator_bulk_get(dev, pwr->num_gdscs, pwr->gdscs);
  231. }
  232. static int arm_smmu_init_interconnect(struct arm_smmu_power_resources *pwr)
  233. {
  234. struct device *dev = pwr->dev;
  235. /* We don't want the interconnect APIs to print an error message */
  236. if (!of_find_property(dev->of_node, "interconnects", NULL)) {
  237. dev_dbg(dev, "No interconnect info\n");
  238. return 0;
  239. }
  240. pwr->icc_path = devm_of_icc_get(dev, NULL);
  241. if (IS_ERR_OR_NULL(pwr->icc_path)) {
  242. if (PTR_ERR(pwr->icc_path) != -EPROBE_DEFER)
  243. dev_err(dev, "Unable to read interconnect path from devicetree rc: %ld\n",
  244. PTR_ERR(pwr->icc_path));
  245. return pwr->icc_path ? PTR_ERR(pwr->icc_path) : -EINVAL;
  246. }
  247. if (of_property_read_bool(dev->of_node, "qcom,active-only"))
  248. icc_set_tag(pwr->icc_path, ARM_SMMU_ICC_ACTIVE_ONLY_TAG);
  249. return 0;
  250. }
  251. /*
  252. * Cleanup done by devm. Any non-devm resources must clean up themselves.
  253. */
  254. struct arm_smmu_power_resources *arm_smmu_init_power_resources(
  255. struct device *dev)
  256. {
  257. struct arm_smmu_power_resources *pwr;
  258. int ret;
  259. pwr = devm_kzalloc(dev, sizeof(*pwr), GFP_KERNEL);
  260. if (!pwr)
  261. return ERR_PTR(-ENOMEM);
  262. pwr->dev = dev;
  263. mutex_init(&pwr->power_lock);
  264. ret = arm_smmu_init_clocks(pwr);
  265. if (ret)
  266. return ERR_PTR(ret);
  267. ret = arm_smmu_init_regulators(pwr);
  268. if (ret)
  269. return ERR_PTR(ret);
  270. ret = arm_smmu_init_interconnect(pwr);
  271. if (ret)
  272. return ERR_PTR(ret);
  273. return pwr;
  274. }