ccu_nm.c 5.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * Copyright (C) 2016 Maxime Ripard
  4. * Maxime Ripard <[email protected]>
  5. */
  6. #include <linux/clk-provider.h>
  7. #include <linux/io.h>
  8. #include "ccu_frac.h"
  9. #include "ccu_gate.h"
  10. #include "ccu_nm.h"
  11. struct _ccu_nm {
  12. unsigned long n, min_n, max_n;
  13. unsigned long m, min_m, max_m;
  14. };
  15. static unsigned long ccu_nm_calc_rate(unsigned long parent,
  16. unsigned long n, unsigned long m)
  17. {
  18. u64 rate = parent;
  19. rate *= n;
  20. do_div(rate, m);
  21. return rate;
  22. }
  23. static void ccu_nm_find_best(unsigned long parent, unsigned long rate,
  24. struct _ccu_nm *nm)
  25. {
  26. unsigned long best_rate = 0;
  27. unsigned long best_n = 0, best_m = 0;
  28. unsigned long _n, _m;
  29. for (_n = nm->min_n; _n <= nm->max_n; _n++) {
  30. for (_m = nm->min_m; _m <= nm->max_m; _m++) {
  31. unsigned long tmp_rate = ccu_nm_calc_rate(parent,
  32. _n, _m);
  33. if (tmp_rate > rate)
  34. continue;
  35. if ((rate - tmp_rate) < (rate - best_rate)) {
  36. best_rate = tmp_rate;
  37. best_n = _n;
  38. best_m = _m;
  39. }
  40. }
  41. }
  42. nm->n = best_n;
  43. nm->m = best_m;
  44. }
  45. static void ccu_nm_disable(struct clk_hw *hw)
  46. {
  47. struct ccu_nm *nm = hw_to_ccu_nm(hw);
  48. return ccu_gate_helper_disable(&nm->common, nm->enable);
  49. }
  50. static int ccu_nm_enable(struct clk_hw *hw)
  51. {
  52. struct ccu_nm *nm = hw_to_ccu_nm(hw);
  53. return ccu_gate_helper_enable(&nm->common, nm->enable);
  54. }
  55. static int ccu_nm_is_enabled(struct clk_hw *hw)
  56. {
  57. struct ccu_nm *nm = hw_to_ccu_nm(hw);
  58. return ccu_gate_helper_is_enabled(&nm->common, nm->enable);
  59. }
  60. static unsigned long ccu_nm_recalc_rate(struct clk_hw *hw,
  61. unsigned long parent_rate)
  62. {
  63. struct ccu_nm *nm = hw_to_ccu_nm(hw);
  64. unsigned long rate;
  65. unsigned long n, m;
  66. u32 reg;
  67. if (ccu_frac_helper_is_enabled(&nm->common, &nm->frac)) {
  68. rate = ccu_frac_helper_read_rate(&nm->common, &nm->frac);
  69. if (nm->common.features & CCU_FEATURE_FIXED_POSTDIV)
  70. rate /= nm->fixed_post_div;
  71. return rate;
  72. }
  73. reg = readl(nm->common.base + nm->common.reg);
  74. n = reg >> nm->n.shift;
  75. n &= (1 << nm->n.width) - 1;
  76. n += nm->n.offset;
  77. if (!n)
  78. n++;
  79. m = reg >> nm->m.shift;
  80. m &= (1 << nm->m.width) - 1;
  81. m += nm->m.offset;
  82. if (!m)
  83. m++;
  84. if (ccu_sdm_helper_is_enabled(&nm->common, &nm->sdm))
  85. rate = ccu_sdm_helper_read_rate(&nm->common, &nm->sdm, m, n);
  86. else
  87. rate = ccu_nm_calc_rate(parent_rate, n, m);
  88. if (nm->common.features & CCU_FEATURE_FIXED_POSTDIV)
  89. rate /= nm->fixed_post_div;
  90. return rate;
  91. }
  92. static long ccu_nm_round_rate(struct clk_hw *hw, unsigned long rate,
  93. unsigned long *parent_rate)
  94. {
  95. struct ccu_nm *nm = hw_to_ccu_nm(hw);
  96. struct _ccu_nm _nm;
  97. if (nm->common.features & CCU_FEATURE_FIXED_POSTDIV)
  98. rate *= nm->fixed_post_div;
  99. if (rate < nm->min_rate) {
  100. rate = nm->min_rate;
  101. if (nm->common.features & CCU_FEATURE_FIXED_POSTDIV)
  102. rate /= nm->fixed_post_div;
  103. return rate;
  104. }
  105. if (nm->max_rate && rate > nm->max_rate) {
  106. rate = nm->max_rate;
  107. if (nm->common.features & CCU_FEATURE_FIXED_POSTDIV)
  108. rate /= nm->fixed_post_div;
  109. return rate;
  110. }
  111. if (ccu_frac_helper_has_rate(&nm->common, &nm->frac, rate)) {
  112. if (nm->common.features & CCU_FEATURE_FIXED_POSTDIV)
  113. rate /= nm->fixed_post_div;
  114. return rate;
  115. }
  116. if (ccu_sdm_helper_has_rate(&nm->common, &nm->sdm, rate)) {
  117. if (nm->common.features & CCU_FEATURE_FIXED_POSTDIV)
  118. rate /= nm->fixed_post_div;
  119. return rate;
  120. }
  121. _nm.min_n = nm->n.min ?: 1;
  122. _nm.max_n = nm->n.max ?: 1 << nm->n.width;
  123. _nm.min_m = 1;
  124. _nm.max_m = nm->m.max ?: 1 << nm->m.width;
  125. ccu_nm_find_best(*parent_rate, rate, &_nm);
  126. rate = ccu_nm_calc_rate(*parent_rate, _nm.n, _nm.m);
  127. if (nm->common.features & CCU_FEATURE_FIXED_POSTDIV)
  128. rate /= nm->fixed_post_div;
  129. return rate;
  130. }
  131. static int ccu_nm_set_rate(struct clk_hw *hw, unsigned long rate,
  132. unsigned long parent_rate)
  133. {
  134. struct ccu_nm *nm = hw_to_ccu_nm(hw);
  135. struct _ccu_nm _nm;
  136. unsigned long flags;
  137. u32 reg;
  138. /* Adjust target rate according to post-dividers */
  139. if (nm->common.features & CCU_FEATURE_FIXED_POSTDIV)
  140. rate = rate * nm->fixed_post_div;
  141. if (ccu_frac_helper_has_rate(&nm->common, &nm->frac, rate)) {
  142. spin_lock_irqsave(nm->common.lock, flags);
  143. /* most SoCs require M to be 0 if fractional mode is used */
  144. reg = readl(nm->common.base + nm->common.reg);
  145. reg &= ~GENMASK(nm->m.width + nm->m.shift - 1, nm->m.shift);
  146. writel(reg, nm->common.base + nm->common.reg);
  147. spin_unlock_irqrestore(nm->common.lock, flags);
  148. ccu_frac_helper_enable(&nm->common, &nm->frac);
  149. return ccu_frac_helper_set_rate(&nm->common, &nm->frac,
  150. rate, nm->lock);
  151. } else {
  152. ccu_frac_helper_disable(&nm->common, &nm->frac);
  153. }
  154. _nm.min_n = nm->n.min ?: 1;
  155. _nm.max_n = nm->n.max ?: 1 << nm->n.width;
  156. _nm.min_m = 1;
  157. _nm.max_m = nm->m.max ?: 1 << nm->m.width;
  158. if (ccu_sdm_helper_has_rate(&nm->common, &nm->sdm, rate)) {
  159. ccu_sdm_helper_enable(&nm->common, &nm->sdm, rate);
  160. /* Sigma delta modulation requires specific N and M factors */
  161. ccu_sdm_helper_get_factors(&nm->common, &nm->sdm, rate,
  162. &_nm.m, &_nm.n);
  163. } else {
  164. ccu_sdm_helper_disable(&nm->common, &nm->sdm);
  165. ccu_nm_find_best(parent_rate, rate, &_nm);
  166. }
  167. spin_lock_irqsave(nm->common.lock, flags);
  168. reg = readl(nm->common.base + nm->common.reg);
  169. reg &= ~GENMASK(nm->n.width + nm->n.shift - 1, nm->n.shift);
  170. reg &= ~GENMASK(nm->m.width + nm->m.shift - 1, nm->m.shift);
  171. reg |= (_nm.n - nm->n.offset) << nm->n.shift;
  172. reg |= (_nm.m - nm->m.offset) << nm->m.shift;
  173. writel(reg, nm->common.base + nm->common.reg);
  174. spin_unlock_irqrestore(nm->common.lock, flags);
  175. ccu_helper_wait_for_lock(&nm->common, nm->lock);
  176. return 0;
  177. }
  178. const struct clk_ops ccu_nm_ops = {
  179. .disable = ccu_nm_disable,
  180. .enable = ccu_nm_enable,
  181. .is_enabled = ccu_nm_is_enabled,
  182. .recalc_rate = ccu_nm_recalc_rate,
  183. .round_rate = ccu_nm_round_rate,
  184. .set_rate = ccu_nm_set_rate,
  185. };
  186. EXPORT_SYMBOL_NS_GPL(ccu_nm_ops, SUNXI_CCU);