ccu_nkmp.c 5.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * Copyright (C) 2016 Maxime Ripard
  4. * Maxime Ripard <[email protected]>
  5. */
  6. #include <linux/clk-provider.h>
  7. #include <linux/io.h>
  8. #include "ccu_gate.h"
  9. #include "ccu_nkmp.h"
  10. struct _ccu_nkmp {
  11. unsigned long n, min_n, max_n;
  12. unsigned long k, min_k, max_k;
  13. unsigned long m, min_m, max_m;
  14. unsigned long p, min_p, max_p;
  15. };
  16. static unsigned long ccu_nkmp_calc_rate(unsigned long parent,
  17. unsigned long n, unsigned long k,
  18. unsigned long m, unsigned long p)
  19. {
  20. u64 rate = parent;
  21. rate *= n * k;
  22. do_div(rate, m * p);
  23. return rate;
  24. }
  25. static void ccu_nkmp_find_best(unsigned long parent, unsigned long rate,
  26. struct _ccu_nkmp *nkmp)
  27. {
  28. unsigned long best_rate = 0;
  29. unsigned long best_n = 0, best_k = 0, best_m = 0, best_p = 0;
  30. unsigned long _n, _k, _m, _p;
  31. for (_k = nkmp->min_k; _k <= nkmp->max_k; _k++) {
  32. for (_n = nkmp->min_n; _n <= nkmp->max_n; _n++) {
  33. for (_m = nkmp->min_m; _m <= nkmp->max_m; _m++) {
  34. for (_p = nkmp->min_p; _p <= nkmp->max_p; _p <<= 1) {
  35. unsigned long tmp_rate;
  36. tmp_rate = ccu_nkmp_calc_rate(parent,
  37. _n, _k,
  38. _m, _p);
  39. if (tmp_rate > rate)
  40. continue;
  41. if ((rate - tmp_rate) < (rate - best_rate)) {
  42. best_rate = tmp_rate;
  43. best_n = _n;
  44. best_k = _k;
  45. best_m = _m;
  46. best_p = _p;
  47. }
  48. }
  49. }
  50. }
  51. }
  52. nkmp->n = best_n;
  53. nkmp->k = best_k;
  54. nkmp->m = best_m;
  55. nkmp->p = best_p;
  56. }
  57. static void ccu_nkmp_disable(struct clk_hw *hw)
  58. {
  59. struct ccu_nkmp *nkmp = hw_to_ccu_nkmp(hw);
  60. return ccu_gate_helper_disable(&nkmp->common, nkmp->enable);
  61. }
  62. static int ccu_nkmp_enable(struct clk_hw *hw)
  63. {
  64. struct ccu_nkmp *nkmp = hw_to_ccu_nkmp(hw);
  65. return ccu_gate_helper_enable(&nkmp->common, nkmp->enable);
  66. }
  67. static int ccu_nkmp_is_enabled(struct clk_hw *hw)
  68. {
  69. struct ccu_nkmp *nkmp = hw_to_ccu_nkmp(hw);
  70. return ccu_gate_helper_is_enabled(&nkmp->common, nkmp->enable);
  71. }
  72. static unsigned long ccu_nkmp_recalc_rate(struct clk_hw *hw,
  73. unsigned long parent_rate)
  74. {
  75. struct ccu_nkmp *nkmp = hw_to_ccu_nkmp(hw);
  76. unsigned long n, m, k, p, rate;
  77. u32 reg;
  78. reg = readl(nkmp->common.base + nkmp->common.reg);
  79. n = reg >> nkmp->n.shift;
  80. n &= (1 << nkmp->n.width) - 1;
  81. n += nkmp->n.offset;
  82. if (!n)
  83. n++;
  84. k = reg >> nkmp->k.shift;
  85. k &= (1 << nkmp->k.width) - 1;
  86. k += nkmp->k.offset;
  87. if (!k)
  88. k++;
  89. m = reg >> nkmp->m.shift;
  90. m &= (1 << nkmp->m.width) - 1;
  91. m += nkmp->m.offset;
  92. if (!m)
  93. m++;
  94. p = reg >> nkmp->p.shift;
  95. p &= (1 << nkmp->p.width) - 1;
  96. rate = ccu_nkmp_calc_rate(parent_rate, n, k, m, 1 << p);
  97. if (nkmp->common.features & CCU_FEATURE_FIXED_POSTDIV)
  98. rate /= nkmp->fixed_post_div;
  99. return rate;
  100. }
  101. static long ccu_nkmp_round_rate(struct clk_hw *hw, unsigned long rate,
  102. unsigned long *parent_rate)
  103. {
  104. struct ccu_nkmp *nkmp = hw_to_ccu_nkmp(hw);
  105. struct _ccu_nkmp _nkmp;
  106. if (nkmp->common.features & CCU_FEATURE_FIXED_POSTDIV)
  107. rate *= nkmp->fixed_post_div;
  108. if (nkmp->max_rate && rate > nkmp->max_rate) {
  109. rate = nkmp->max_rate;
  110. if (nkmp->common.features & CCU_FEATURE_FIXED_POSTDIV)
  111. rate /= nkmp->fixed_post_div;
  112. return rate;
  113. }
  114. _nkmp.min_n = nkmp->n.min ?: 1;
  115. _nkmp.max_n = nkmp->n.max ?: 1 << nkmp->n.width;
  116. _nkmp.min_k = nkmp->k.min ?: 1;
  117. _nkmp.max_k = nkmp->k.max ?: 1 << nkmp->k.width;
  118. _nkmp.min_m = 1;
  119. _nkmp.max_m = nkmp->m.max ?: 1 << nkmp->m.width;
  120. _nkmp.min_p = 1;
  121. _nkmp.max_p = nkmp->p.max ?: 1 << ((1 << nkmp->p.width) - 1);
  122. ccu_nkmp_find_best(*parent_rate, rate, &_nkmp);
  123. rate = ccu_nkmp_calc_rate(*parent_rate, _nkmp.n, _nkmp.k,
  124. _nkmp.m, _nkmp.p);
  125. if (nkmp->common.features & CCU_FEATURE_FIXED_POSTDIV)
  126. rate = rate / nkmp->fixed_post_div;
  127. return rate;
  128. }
  129. static int ccu_nkmp_set_rate(struct clk_hw *hw, unsigned long rate,
  130. unsigned long parent_rate)
  131. {
  132. struct ccu_nkmp *nkmp = hw_to_ccu_nkmp(hw);
  133. u32 n_mask = 0, k_mask = 0, m_mask = 0, p_mask = 0;
  134. struct _ccu_nkmp _nkmp;
  135. unsigned long flags;
  136. u32 reg;
  137. if (nkmp->common.features & CCU_FEATURE_FIXED_POSTDIV)
  138. rate = rate * nkmp->fixed_post_div;
  139. _nkmp.min_n = nkmp->n.min ?: 1;
  140. _nkmp.max_n = nkmp->n.max ?: 1 << nkmp->n.width;
  141. _nkmp.min_k = nkmp->k.min ?: 1;
  142. _nkmp.max_k = nkmp->k.max ?: 1 << nkmp->k.width;
  143. _nkmp.min_m = 1;
  144. _nkmp.max_m = nkmp->m.max ?: 1 << nkmp->m.width;
  145. _nkmp.min_p = 1;
  146. _nkmp.max_p = nkmp->p.max ?: 1 << ((1 << nkmp->p.width) - 1);
  147. ccu_nkmp_find_best(parent_rate, rate, &_nkmp);
  148. /*
  149. * If width is 0, GENMASK() macro may not generate expected mask (0)
  150. * as it falls under undefined behaviour by C standard due to shifts
  151. * which are equal or greater than width of left operand. This can
  152. * be easily avoided by explicitly checking if width is 0.
  153. */
  154. if (nkmp->n.width)
  155. n_mask = GENMASK(nkmp->n.width + nkmp->n.shift - 1,
  156. nkmp->n.shift);
  157. if (nkmp->k.width)
  158. k_mask = GENMASK(nkmp->k.width + nkmp->k.shift - 1,
  159. nkmp->k.shift);
  160. if (nkmp->m.width)
  161. m_mask = GENMASK(nkmp->m.width + nkmp->m.shift - 1,
  162. nkmp->m.shift);
  163. if (nkmp->p.width)
  164. p_mask = GENMASK(nkmp->p.width + nkmp->p.shift - 1,
  165. nkmp->p.shift);
  166. spin_lock_irqsave(nkmp->common.lock, flags);
  167. reg = readl(nkmp->common.base + nkmp->common.reg);
  168. reg &= ~(n_mask | k_mask | m_mask | p_mask);
  169. reg |= ((_nkmp.n - nkmp->n.offset) << nkmp->n.shift) & n_mask;
  170. reg |= ((_nkmp.k - nkmp->k.offset) << nkmp->k.shift) & k_mask;
  171. reg |= ((_nkmp.m - nkmp->m.offset) << nkmp->m.shift) & m_mask;
  172. reg |= (ilog2(_nkmp.p) << nkmp->p.shift) & p_mask;
  173. writel(reg, nkmp->common.base + nkmp->common.reg);
  174. spin_unlock_irqrestore(nkmp->common.lock, flags);
  175. ccu_helper_wait_for_lock(&nkmp->common, nkmp->lock);
  176. return 0;
  177. }
  178. const struct clk_ops ccu_nkmp_ops = {
  179. .disable = ccu_nkmp_disable,
  180. .enable = ccu_nkmp_enable,
  181. .is_enabled = ccu_nkmp_is_enabled,
  182. .recalc_rate = ccu_nkmp_recalc_rate,
  183. .round_rate = ccu_nkmp_round_rate,
  184. .set_rate = ccu_nkmp_set_rate,
  185. };
  186. EXPORT_SYMBOL_NS_GPL(ccu_nkmp_ops, SUNXI_CCU);