clk-half-divider.c 5.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (c) 2018 Fuzhou Rockchip Electronics Co., Ltd
  4. */
  5. #include <linux/clk-provider.h>
  6. #include <linux/io.h>
  7. #include <linux/slab.h>
  8. #include "clk.h"
  9. #define div_mask(width) ((1 << (width)) - 1)
  10. static bool _is_best_half_div(unsigned long rate, unsigned long now,
  11. unsigned long best, unsigned long flags)
  12. {
  13. if (flags & CLK_DIVIDER_ROUND_CLOSEST)
  14. return abs(rate - now) < abs(rate - best);
  15. return now <= rate && now > best;
  16. }
  17. static unsigned long clk_half_divider_recalc_rate(struct clk_hw *hw,
  18. unsigned long parent_rate)
  19. {
  20. struct clk_divider *divider = to_clk_divider(hw);
  21. unsigned int val;
  22. val = readl(divider->reg) >> divider->shift;
  23. val &= div_mask(divider->width);
  24. val = val * 2 + 3;
  25. return DIV_ROUND_UP_ULL(((u64)parent_rate * 2), val);
  26. }
  27. static int clk_half_divider_bestdiv(struct clk_hw *hw, unsigned long rate,
  28. unsigned long *best_parent_rate, u8 width,
  29. unsigned long flags)
  30. {
  31. unsigned int i, bestdiv = 0;
  32. unsigned long parent_rate, best = 0, now, maxdiv;
  33. unsigned long parent_rate_saved = *best_parent_rate;
  34. if (!rate)
  35. rate = 1;
  36. maxdiv = div_mask(width);
  37. if (!(clk_hw_get_flags(hw) & CLK_SET_RATE_PARENT)) {
  38. parent_rate = *best_parent_rate;
  39. bestdiv = DIV_ROUND_UP_ULL(((u64)parent_rate * 2), rate);
  40. if (bestdiv < 3)
  41. bestdiv = 0;
  42. else
  43. bestdiv = (bestdiv - 3) / 2;
  44. bestdiv = bestdiv > maxdiv ? maxdiv : bestdiv;
  45. return bestdiv;
  46. }
  47. /*
  48. * The maximum divider we can use without overflowing
  49. * unsigned long in rate * i below
  50. */
  51. maxdiv = min(ULONG_MAX / rate, maxdiv);
  52. for (i = 0; i <= maxdiv; i++) {
  53. if (((u64)rate * (i * 2 + 3)) == ((u64)parent_rate_saved * 2)) {
  54. /*
  55. * It's the most ideal case if the requested rate can be
  56. * divided from parent clock without needing to change
  57. * parent rate, so return the divider immediately.
  58. */
  59. *best_parent_rate = parent_rate_saved;
  60. return i;
  61. }
  62. parent_rate = clk_hw_round_rate(clk_hw_get_parent(hw),
  63. ((u64)rate * (i * 2 + 3)) / 2);
  64. now = DIV_ROUND_UP_ULL(((u64)parent_rate * 2),
  65. (i * 2 + 3));
  66. if (_is_best_half_div(rate, now, best, flags)) {
  67. bestdiv = i;
  68. best = now;
  69. *best_parent_rate = parent_rate;
  70. }
  71. }
  72. if (!bestdiv) {
  73. bestdiv = div_mask(width);
  74. *best_parent_rate = clk_hw_round_rate(clk_hw_get_parent(hw), 1);
  75. }
  76. return bestdiv;
  77. }
  78. static long clk_half_divider_round_rate(struct clk_hw *hw, unsigned long rate,
  79. unsigned long *prate)
  80. {
  81. struct clk_divider *divider = to_clk_divider(hw);
  82. int div;
  83. div = clk_half_divider_bestdiv(hw, rate, prate,
  84. divider->width,
  85. divider->flags);
  86. return DIV_ROUND_UP_ULL(((u64)*prate * 2), div * 2 + 3);
  87. }
  88. static int clk_half_divider_set_rate(struct clk_hw *hw, unsigned long rate,
  89. unsigned long parent_rate)
  90. {
  91. struct clk_divider *divider = to_clk_divider(hw);
  92. unsigned int value;
  93. unsigned long flags = 0;
  94. u32 val;
  95. value = DIV_ROUND_UP_ULL(((u64)parent_rate * 2), rate);
  96. value = (value - 3) / 2;
  97. value = min_t(unsigned int, value, div_mask(divider->width));
  98. if (divider->lock)
  99. spin_lock_irqsave(divider->lock, flags);
  100. else
  101. __acquire(divider->lock);
  102. if (divider->flags & CLK_DIVIDER_HIWORD_MASK) {
  103. val = div_mask(divider->width) << (divider->shift + 16);
  104. } else {
  105. val = readl(divider->reg);
  106. val &= ~(div_mask(divider->width) << divider->shift);
  107. }
  108. val |= value << divider->shift;
  109. writel(val, divider->reg);
  110. if (divider->lock)
  111. spin_unlock_irqrestore(divider->lock, flags);
  112. else
  113. __release(divider->lock);
  114. return 0;
  115. }
  116. static const struct clk_ops clk_half_divider_ops = {
  117. .recalc_rate = clk_half_divider_recalc_rate,
  118. .round_rate = clk_half_divider_round_rate,
  119. .set_rate = clk_half_divider_set_rate,
  120. };
  121. /*
  122. * Register a clock branch.
  123. * Most clock branches have a form like
  124. *
  125. * src1 --|--\
  126. * |M |--[GATE]-[DIV]-
  127. * src2 --|--/
  128. *
  129. * sometimes without one of those components.
  130. */
  131. struct clk *rockchip_clk_register_halfdiv(const char *name,
  132. const char *const *parent_names,
  133. u8 num_parents, void __iomem *base,
  134. int muxdiv_offset, u8 mux_shift,
  135. u8 mux_width, u8 mux_flags,
  136. u8 div_shift, u8 div_width,
  137. u8 div_flags, int gate_offset,
  138. u8 gate_shift, u8 gate_flags,
  139. unsigned long flags,
  140. spinlock_t *lock)
  141. {
  142. struct clk_hw *hw = ERR_PTR(-ENOMEM);
  143. struct clk_mux *mux = NULL;
  144. struct clk_gate *gate = NULL;
  145. struct clk_divider *div = NULL;
  146. const struct clk_ops *mux_ops = NULL, *div_ops = NULL,
  147. *gate_ops = NULL;
  148. if (num_parents > 1) {
  149. mux = kzalloc(sizeof(*mux), GFP_KERNEL);
  150. if (!mux)
  151. return ERR_PTR(-ENOMEM);
  152. mux->reg = base + muxdiv_offset;
  153. mux->shift = mux_shift;
  154. mux->mask = BIT(mux_width) - 1;
  155. mux->flags = mux_flags;
  156. mux->lock = lock;
  157. mux_ops = (mux_flags & CLK_MUX_READ_ONLY) ? &clk_mux_ro_ops
  158. : &clk_mux_ops;
  159. }
  160. if (gate_offset >= 0) {
  161. gate = kzalloc(sizeof(*gate), GFP_KERNEL);
  162. if (!gate)
  163. goto err_gate;
  164. gate->flags = gate_flags;
  165. gate->reg = base + gate_offset;
  166. gate->bit_idx = gate_shift;
  167. gate->lock = lock;
  168. gate_ops = &clk_gate_ops;
  169. }
  170. if (div_width > 0) {
  171. div = kzalloc(sizeof(*div), GFP_KERNEL);
  172. if (!div)
  173. goto err_div;
  174. div->flags = div_flags;
  175. div->reg = base + muxdiv_offset;
  176. div->shift = div_shift;
  177. div->width = div_width;
  178. div->lock = lock;
  179. div_ops = &clk_half_divider_ops;
  180. }
  181. hw = clk_hw_register_composite(NULL, name, parent_names, num_parents,
  182. mux ? &mux->hw : NULL, mux_ops,
  183. div ? &div->hw : NULL, div_ops,
  184. gate ? &gate->hw : NULL, gate_ops,
  185. flags);
  186. if (IS_ERR(hw))
  187. goto err_div;
  188. return hw->clk;
  189. err_div:
  190. kfree(gate);
  191. err_gate:
  192. kfree(mux);
  193. return ERR_CAST(hw);
  194. }