clk-spmi-pmic-div.c 7.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved. */
  3. #include <linux/bitops.h>
  4. #include <linux/clk.h>
  5. #include <linux/clk-provider.h>
  6. #include <linux/delay.h>
  7. #include <linux/err.h>
  8. #include <linux/log2.h>
  9. #include <linux/module.h>
  10. #include <linux/of.h>
  11. #include <linux/platform_device.h>
  12. #include <linux/regmap.h>
  13. #include <linux/slab.h>
  14. #include <linux/types.h>
  15. #define REG_DIV_CTL1 0x43
  16. #define DIV_CTL1_DIV_FACTOR_MASK GENMASK(2, 0)
  17. #define REG_EN_CTL 0x46
  18. #define REG_EN_MASK BIT(7)
  19. struct clkdiv {
  20. struct regmap *regmap;
  21. u16 base;
  22. spinlock_t lock;
  23. struct clk_hw hw;
  24. unsigned int cxo_period_ns;
  25. };
  26. static inline struct clkdiv *to_clkdiv(struct clk_hw *hw)
  27. {
  28. return container_of(hw, struct clkdiv, hw);
  29. }
  30. static inline unsigned int div_factor_to_div(unsigned int div_factor)
  31. {
  32. if (!div_factor)
  33. div_factor = 1;
  34. return 1 << (div_factor - 1);
  35. }
  36. static inline unsigned int div_to_div_factor(unsigned int div)
  37. {
  38. return min(ilog2(div) + 1, 7);
  39. }
  40. static bool is_spmi_pmic_clkdiv_enabled(struct clkdiv *clkdiv)
  41. {
  42. unsigned int val = 0;
  43. regmap_read(clkdiv->regmap, clkdiv->base + REG_EN_CTL, &val);
  44. return val & REG_EN_MASK;
  45. }
  46. static int
  47. __spmi_pmic_clkdiv_set_enable_state(struct clkdiv *clkdiv, bool enable,
  48. unsigned int div_factor)
  49. {
  50. int ret;
  51. unsigned int ns = clkdiv->cxo_period_ns;
  52. unsigned int div = div_factor_to_div(div_factor);
  53. ret = regmap_update_bits(clkdiv->regmap, clkdiv->base + REG_EN_CTL,
  54. REG_EN_MASK, enable ? REG_EN_MASK : 0);
  55. if (ret)
  56. return ret;
  57. if (enable)
  58. ndelay((2 + 3 * div) * ns);
  59. else
  60. ndelay(3 * div * ns);
  61. return 0;
  62. }
  63. static int spmi_pmic_clkdiv_set_enable_state(struct clkdiv *clkdiv, bool enable)
  64. {
  65. unsigned int div_factor;
  66. regmap_read(clkdiv->regmap, clkdiv->base + REG_DIV_CTL1, &div_factor);
  67. div_factor &= DIV_CTL1_DIV_FACTOR_MASK;
  68. return __spmi_pmic_clkdiv_set_enable_state(clkdiv, enable, div_factor);
  69. }
  70. static int clk_spmi_pmic_div_enable(struct clk_hw *hw)
  71. {
  72. struct clkdiv *clkdiv = to_clkdiv(hw);
  73. unsigned long flags;
  74. int ret;
  75. spin_lock_irqsave(&clkdiv->lock, flags);
  76. ret = spmi_pmic_clkdiv_set_enable_state(clkdiv, true);
  77. spin_unlock_irqrestore(&clkdiv->lock, flags);
  78. return ret;
  79. }
  80. static void clk_spmi_pmic_div_disable(struct clk_hw *hw)
  81. {
  82. struct clkdiv *clkdiv = to_clkdiv(hw);
  83. unsigned long flags;
  84. spin_lock_irqsave(&clkdiv->lock, flags);
  85. spmi_pmic_clkdiv_set_enable_state(clkdiv, false);
  86. spin_unlock_irqrestore(&clkdiv->lock, flags);
  87. }
  88. static long clk_spmi_pmic_div_round_rate(struct clk_hw *hw, unsigned long rate,
  89. unsigned long *parent_rate)
  90. {
  91. unsigned int div, div_factor;
  92. div = DIV_ROUND_UP(*parent_rate, rate);
  93. div_factor = div_to_div_factor(div);
  94. div = div_factor_to_div(div_factor);
  95. return *parent_rate / div;
  96. }
  97. static unsigned long
  98. clk_spmi_pmic_div_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
  99. {
  100. struct clkdiv *clkdiv = to_clkdiv(hw);
  101. unsigned int div_factor;
  102. regmap_read(clkdiv->regmap, clkdiv->base + REG_DIV_CTL1, &div_factor);
  103. div_factor &= DIV_CTL1_DIV_FACTOR_MASK;
  104. return parent_rate / div_factor_to_div(div_factor);
  105. }
  106. static int clk_spmi_pmic_div_set_rate(struct clk_hw *hw, unsigned long rate,
  107. unsigned long parent_rate)
  108. {
  109. struct clkdiv *clkdiv = to_clkdiv(hw);
  110. unsigned int div_factor = div_to_div_factor(parent_rate / rate);
  111. unsigned long flags;
  112. bool enabled;
  113. int ret;
  114. spin_lock_irqsave(&clkdiv->lock, flags);
  115. enabled = is_spmi_pmic_clkdiv_enabled(clkdiv);
  116. if (enabled) {
  117. ret = spmi_pmic_clkdiv_set_enable_state(clkdiv, false);
  118. if (ret)
  119. goto unlock;
  120. }
  121. ret = regmap_update_bits(clkdiv->regmap, clkdiv->base + REG_DIV_CTL1,
  122. DIV_CTL1_DIV_FACTOR_MASK, div_factor);
  123. if (ret)
  124. goto unlock;
  125. if (enabled)
  126. ret = __spmi_pmic_clkdiv_set_enable_state(clkdiv, true,
  127. div_factor);
  128. unlock:
  129. spin_unlock_irqrestore(&clkdiv->lock, flags);
  130. return ret;
  131. }
  132. static const struct clk_ops clk_spmi_pmic_div_ops = {
  133. .enable = clk_spmi_pmic_div_enable,
  134. .disable = clk_spmi_pmic_div_disable,
  135. .set_rate = clk_spmi_pmic_div_set_rate,
  136. .recalc_rate = clk_spmi_pmic_div_recalc_rate,
  137. .round_rate = clk_spmi_pmic_div_round_rate,
  138. };
  139. struct spmi_pmic_div_clk_cc {
  140. int nclks;
  141. struct clkdiv clks[];
  142. };
  143. static struct clk_hw *
  144. spmi_pmic_div_clk_hw_get(struct of_phandle_args *clkspec, void *data)
  145. {
  146. struct spmi_pmic_div_clk_cc *cc = data;
  147. int idx = clkspec->args[0] - 1; /* Start at 1 instead of 0 */
  148. if (idx < 0 || idx >= cc->nclks) {
  149. pr_err("%s: index value %u is invalid; allowed range [1, %d]\n",
  150. __func__, clkspec->args[0], cc->nclks);
  151. return ERR_PTR(-EINVAL);
  152. }
  153. return &cc->clks[idx].hw;
  154. }
  155. static int spmi_pmic_clkdiv_probe(struct platform_device *pdev)
  156. {
  157. struct spmi_pmic_div_clk_cc *cc;
  158. struct clk_init_data init = {};
  159. struct clkdiv *clkdiv;
  160. struct clk *cxo;
  161. struct regmap *regmap;
  162. struct device *dev = &pdev->dev;
  163. struct device_node *of_node = dev->of_node;
  164. bool use_dt_name = false;
  165. const char *parent_name;
  166. int nclks, i, ret, cxo_hz;
  167. char name[20];
  168. u32 start;
  169. ret = of_property_read_u32(of_node, "reg", &start);
  170. if (ret < 0) {
  171. dev_err(dev, "reg property reading failed\n");
  172. return ret;
  173. }
  174. regmap = dev_get_regmap(dev->parent, NULL);
  175. if (!regmap) {
  176. dev_err(dev, "Couldn't get parent's regmap\n");
  177. return -EINVAL;
  178. }
  179. ret = of_property_read_u32(of_node, "qcom,num-clkdivs", &nclks);
  180. if (ret < 0) {
  181. dev_err(dev, "qcom,num-clkdivs property reading failed, ret=%d\n",
  182. ret);
  183. return ret;
  184. }
  185. if (!nclks)
  186. return -EINVAL;
  187. cc = devm_kzalloc(dev, struct_size(cc, clks, nclks), GFP_KERNEL);
  188. if (!cc)
  189. return -ENOMEM;
  190. cc->nclks = nclks;
  191. cxo = clk_get(dev, "xo");
  192. if (IS_ERR(cxo)) {
  193. ret = PTR_ERR(cxo);
  194. if (ret != -EPROBE_DEFER)
  195. dev_err(dev, "failed to get xo clock\n");
  196. return ret;
  197. }
  198. cxo_hz = clk_get_rate(cxo);
  199. clk_put(cxo);
  200. if (cxo_hz <= 0) {
  201. dev_err(dev, "invalid CXO rate: %d\n", cxo_hz);
  202. return -EINVAL;
  203. }
  204. parent_name = of_clk_get_parent_name(of_node, 0);
  205. if (!parent_name) {
  206. dev_err(dev, "missing parent clock\n");
  207. return -ENODEV;
  208. }
  209. if (of_find_property(of_node, "clock-output-names", NULL))
  210. use_dt_name = true;
  211. init.name = name;
  212. init.parent_names = &parent_name;
  213. init.num_parents = 1;
  214. init.ops = &clk_spmi_pmic_div_ops;
  215. for (i = 0, clkdiv = cc->clks; i < nclks; i++) {
  216. if (use_dt_name) {
  217. ret = of_property_read_string_index(of_node,
  218. "clock-output-names", i, &init.name);
  219. if (ret) {
  220. dev_err(dev, "could not read clock-output-names %d, ret=%d\n",
  221. i, ret);
  222. return ret;
  223. }
  224. } else {
  225. snprintf(name, sizeof(name), "div_clk%d", i + 1);
  226. }
  227. spin_lock_init(&clkdiv[i].lock);
  228. clkdiv[i].base = start + i * 0x100;
  229. clkdiv[i].regmap = regmap;
  230. clkdiv[i].cxo_period_ns = NSEC_PER_SEC / cxo_hz;
  231. clkdiv[i].hw.init = &init;
  232. ret = devm_clk_hw_register(dev, &clkdiv[i].hw);
  233. if (ret)
  234. return ret;
  235. }
  236. return devm_of_clk_add_hw_provider(dev, spmi_pmic_div_clk_hw_get, cc);
  237. }
  238. static const struct of_device_id spmi_pmic_clkdiv_match_table[] = {
  239. { .compatible = "qcom,spmi-clkdiv" },
  240. { /* sentinel */ }
  241. };
  242. MODULE_DEVICE_TABLE(of, spmi_pmic_clkdiv_match_table);
  243. static struct platform_driver spmi_pmic_clkdiv_driver = {
  244. .driver = {
  245. .name = "qcom,spmi-pmic-clkdiv",
  246. .of_match_table = spmi_pmic_clkdiv_match_table,
  247. },
  248. .probe = spmi_pmic_clkdiv_probe,
  249. };
  250. module_platform_driver(spmi_pmic_clkdiv_driver);
  251. MODULE_DESCRIPTION("QCOM SPMI PMIC clkdiv driver");
  252. MODULE_LICENSE("GPL v2");